From ea6e9f22b97177ae751913f505f4113182e06067 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Tue, 23 Feb 2021 00:27:22 +0100 Subject: [PATCH 1/5] Vendor metrics --- go.sum | 8 + vendor/github.com/beorn7/perks/LICENSE | 20 + .../beorn7/perks/quantile/exampledata.txt | 2388 +++++++ .../beorn7/perks/quantile/stream.go | 316 + vendor/github.com/blang/semver/.travis.yml | 21 + vendor/github.com/blang/semver/LICENSE | 22 + vendor/github.com/blang/semver/README.md | 194 + vendor/github.com/blang/semver/json.go | 23 + vendor/github.com/blang/semver/package.json | 17 + vendor/github.com/blang/semver/range.go | 416 ++ vendor/github.com/blang/semver/semver.go | 418 ++ vendor/github.com/blang/semver/sort.go | 28 + vendor/github.com/blang/semver/sql.go | 30 + .../github.com/cespare/xxhash/v2/.travis.yml | 8 + .../github.com/cespare/xxhash/v2/LICENSE.txt | 22 + vendor/github.com/cespare/xxhash/v2/README.md | 67 + vendor/github.com/cespare/xxhash/v2/go.mod | 3 + vendor/github.com/cespare/xxhash/v2/go.sum | 0 vendor/github.com/cespare/xxhash/v2/xxhash.go | 236 + .../cespare/xxhash/v2/xxhash_amd64.go | 13 + .../cespare/xxhash/v2/xxhash_amd64.s | 215 + .../cespare/xxhash/v2/xxhash_other.go | 76 + .../cespare/xxhash/v2/xxhash_safe.go | 15 + .../cespare/xxhash/v2/xxhash_unsafe.go | 46 + .../golang_protobuf_extensions/LICENSE | 201 + .../golang_protobuf_extensions/NOTICE | 1 + .../pbutil/.gitignore | 1 + .../pbutil/Makefile | 7 + .../pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + .../prometheus/client_golang/LICENSE | 201 + .../prometheus/client_golang/NOTICE | 23 + .../client_golang/prometheus/.gitignore | 1 + .../client_golang/prometheus/README.md | 1 + .../client_golang/prometheus/build_info.go | 29 + .../prometheus/build_info_pre_1.12.go | 22 + .../client_golang/prometheus/collector.go | 120 + .../client_golang/prometheus/counter.go | 321 + .../client_golang/prometheus/desc.go | 186 + .../client_golang/prometheus/doc.go | 199 + .../prometheus/expvar_collector.go | 119 + .../client_golang/prometheus/fnv.go | 42 + .../client_golang/prometheus/gauge.go | 289 + .../client_golang/prometheus/go_collector.go | 396 ++ .../client_golang/prometheus/histogram.go | 637 ++ .../prometheus/internal/metric.go | 85 + .../client_golang/prometheus/labels.go | 87 + .../client_golang/prometheus/metric.go | 176 + .../client_golang/prometheus/observer.go | 64 + .../prometheus/process_collector.go | 151 + .../prometheus/process_collector_other.go | 65 + .../prometheus/process_collector_windows.go | 116 + .../prometheus/promhttp/delegator.go | 370 + .../client_golang/prometheus/promhttp/http.go | 379 + .../prometheus/promhttp/instrument_client.go | 219 + .../prometheus/promhttp/instrument_server.go | 447 ++ .../client_golang/prometheus/registry.go | 948 +++ .../client_golang/prometheus/summary.go | 737 ++ .../client_golang/prometheus/timer.go | 54 + .../client_golang/prometheus/untyped.go | 42 + .../client_golang/prometheus/value.go | 205 + .../client_golang/prometheus/vec.go | 484 ++ .../client_golang/prometheus/wrap.go | 212 + .../prometheus/client_model/LICENSE | 201 + .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/go/metrics.pb.go | 723 ++ vendor/github.com/prometheus/common/LICENSE | 201 + vendor/github.com/prometheus/common/NOTICE | 5 + .../prometheus/common/expfmt/decode.go | 429 ++ .../prometheus/common/expfmt/encode.go | 162 + .../prometheus/common/expfmt/expfmt.go | 41 + .../prometheus/common/expfmt/fuzz.go | 36 + .../common/expfmt/openmetrics_create.go | 527 ++ .../prometheus/common/expfmt/text_create.go | 465 ++ .../prometheus/common/expfmt/text_parse.go | 764 ++ .../bitbucket.org/ww/goautoneg/README.txt | 67 + .../bitbucket.org/ww/goautoneg/autoneg.go | 162 + .../prometheus/common/model/alert.go | 136 + .../prometheus/common/model/fingerprinting.go | 105 + .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 210 + .../prometheus/common/model/labelset.go | 169 + .../prometheus/common/model/metric.go | 102 + .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 + .../prometheus/common/model/silence.go | 106 + .../prometheus/common/model/time.go | 274 + .../prometheus/common/model/value.go | 416 ++ .../github.com/prometheus/procfs/.gitignore | 1 + .../prometheus/procfs/.golangci.yml | 4 + .../prometheus/procfs/CODE_OF_CONDUCT.md | 3 + .../prometheus/procfs/CONTRIBUTING.md | 121 + vendor/github.com/prometheus/procfs/LICENSE | 201 + .../prometheus/procfs/MAINTAINERS.md | 2 + vendor/github.com/prometheus/procfs/Makefile | 29 + .../prometheus/procfs/Makefile.common | 300 + vendor/github.com/prometheus/procfs/NOTICE | 7 + vendor/github.com/prometheus/procfs/README.md | 61 + vendor/github.com/prometheus/procfs/arp.go | 85 + .../github.com/prometheus/procfs/buddyinfo.go | 85 + .../github.com/prometheus/procfs/cpuinfo.go | 464 ++ .../prometheus/procfs/cpuinfo_armx.go | 19 + .../prometheus/procfs/cpuinfo_mipsx.go | 19 + .../prometheus/procfs/cpuinfo_others.go | 19 + .../prometheus/procfs/cpuinfo_ppcx.go | 19 + .../prometheus/procfs/cpuinfo_s390x.go | 18 + .../prometheus/procfs/cpuinfo_x86.go | 19 + vendor/github.com/prometheus/procfs/crypto.go | 153 + vendor/github.com/prometheus/procfs/doc.go | 45 + .../prometheus/procfs/fixtures.ttar | 6185 +++++++++++++++++ vendor/github.com/prometheus/procfs/fs.go | 43 + .../github.com/prometheus/procfs/fscache.go | 422 ++ vendor/github.com/prometheus/procfs/go.mod | 9 + vendor/github.com/prometheus/procfs/go.sum | 6 + .../prometheus/procfs/internal/fs/fs.go | 55 + .../prometheus/procfs/internal/util/parse.go | 97 + .../procfs/internal/util/readfile.go | 38 + .../procfs/internal/util/sysreadfile.go | 48 + .../internal/util/sysreadfile_compat.go | 26 + .../procfs/internal/util/valueparser.go | 91 + vendor/github.com/prometheus/procfs/ipvs.go | 241 + .../prometheus/procfs/kernel_random.go | 62 + .../github.com/prometheus/procfs/loadavg.go | 62 + vendor/github.com/prometheus/procfs/mdstat.go | 197 + .../github.com/prometheus/procfs/meminfo.go | 277 + .../github.com/prometheus/procfs/mountinfo.go | 180 + .../prometheus/procfs/mountstats.go | 629 ++ .../prometheus/procfs/net_conntrackstat.go | 153 + .../github.com/prometheus/procfs/net_dev.go | 205 + .../prometheus/procfs/net_sockstat.go | 163 + .../prometheus/procfs/net_softnet.go | 102 + .../github.com/prometheus/procfs/net_udp.go | 229 + .../github.com/prometheus/procfs/net_unix.go | 257 + vendor/github.com/prometheus/procfs/proc.go | 319 + .../prometheus/procfs/proc_cgroup.go | 98 + .../prometheus/procfs/proc_environ.go | 37 + .../prometheus/procfs/proc_fdinfo.go | 133 + .../github.com/prometheus/procfs/proc_io.go | 59 + .../prometheus/procfs/proc_limits.go | 157 + .../github.com/prometheus/procfs/proc_maps.go | 209 + .../github.com/prometheus/procfs/proc_ns.go | 68 + .../github.com/prometheus/procfs/proc_psi.go | 100 + .../prometheus/procfs/proc_smaps.go | 165 + .../github.com/prometheus/procfs/proc_stat.go | 192 + .../prometheus/procfs/proc_status.go | 170 + .../github.com/prometheus/procfs/schedstat.go | 118 + vendor/github.com/prometheus/procfs/stat.go | 244 + vendor/github.com/prometheus/procfs/swaps.go | 89 + vendor/github.com/prometheus/procfs/ttar | 413 ++ vendor/github.com/prometheus/procfs/vm.go | 210 + vendor/github.com/prometheus/procfs/xfrm.go | 187 + .../github.com/prometheus/procfs/zoneinfo.go | 196 + vendor/k8s.io/component-base/metrics/OWNERS | 10 + .../component-base/metrics/collector.go | 190 + .../k8s.io/component-base/metrics/counter.go | 178 + vendor/k8s.io/component-base/metrics/desc.go | 225 + vendor/k8s.io/component-base/metrics/gauge.go | 193 + .../component-base/metrics/histogram.go | 177 + vendor/k8s.io/component-base/metrics/http.go | 77 + .../k8s.io/component-base/metrics/labels.go | 22 + .../metrics/legacyregistry/registry.go | 86 + .../k8s.io/component-base/metrics/metric.go | 228 + .../k8s.io/component-base/metrics/options.go | 79 + vendor/k8s.io/component-base/metrics/opts.go | 245 + .../metrics/processstarttime.go | 67 + .../k8s.io/component-base/metrics/registry.go | 329 + .../k8s.io/component-base/metrics/summary.go | 171 + vendor/k8s.io/component-base/metrics/value.go | 60 + .../k8s.io/component-base/metrics/version.go | 37 + .../component-base/metrics/version_parser.go | 50 + .../k8s.io/component-base/metrics/wrappers.go | 95 + .../component-base/version/.gitattributes | 1 + vendor/k8s.io/component-base/version/base.go | 63 + vendor/k8s.io/component-base/version/def.bzl | 39 + .../k8s.io/component-base/version/version.go | 42 + vendor/modules.txt | 25 + 177 files changed, 35188 insertions(+) create mode 100644 vendor/github.com/beorn7/perks/LICENSE create mode 100644 vendor/github.com/beorn7/perks/quantile/exampledata.txt create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 vendor/github.com/blang/semver/.travis.yml create mode 100644 vendor/github.com/blang/semver/LICENSE create mode 100644 vendor/github.com/blang/semver/README.md create mode 100644 vendor/github.com/blang/semver/json.go create mode 100644 vendor/github.com/blang/semver/package.json create mode 100644 vendor/github.com/blang/semver/range.go create mode 100644 vendor/github.com/blang/semver/semver.go create mode 100644 vendor/github.com/blang/semver/sort.go create mode 100644 vendor/github.com/blang/semver/sql.go create mode 100644 vendor/github.com/cespare/xxhash/v2/.travis.yml create mode 100644 vendor/github.com/cespare/xxhash/v2/LICENSE.txt create mode 100644 vendor/github.com/cespare/xxhash/v2/README.md create mode 100644 vendor/github.com/cespare/xxhash/v2/go.mod create mode 100644 vendor/github.com/cespare/xxhash/v2/go.sum create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_other.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_safe.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/.gitignore create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/wrap.go create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/openmetrics_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/procfs/.gitignore create mode 100644 vendor/github.com/prometheus/procfs/.golangci.yml create mode 100644 vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/prometheus/procfs/CONTRIBUTING.md create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/MAINTAINERS.md create mode 100644 vendor/github.com/prometheus/procfs/Makefile create mode 100644 vendor/github.com/prometheus/procfs/Makefile.common create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/README.md create mode 100644 vendor/github.com/prometheus/procfs/arp.go create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_armx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_others.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_s390x.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_x86.go create mode 100644 vendor/github.com/prometheus/procfs/crypto.go create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fixtures.ttar create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/fscache.go create mode 100644 vendor/github.com/prometheus/procfs/go.mod create mode 100644 vendor/github.com/prometheus/procfs/go.sum create mode 100644 vendor/github.com/prometheus/procfs/internal/fs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/readfile.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/valueparser.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/kernel_random.go create mode 100644 vendor/github.com/prometheus/procfs/loadavg.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/meminfo.go create mode 100644 vendor/github.com/prometheus/procfs/mountinfo.go create mode 100644 vendor/github.com/prometheus/procfs/mountstats.go create mode 100644 vendor/github.com/prometheus/procfs/net_conntrackstat.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 vendor/github.com/prometheus/procfs/net_sockstat.go create mode 100644 vendor/github.com/prometheus/procfs/net_softnet.go create mode 100644 vendor/github.com/prometheus/procfs/net_udp.go create mode 100644 vendor/github.com/prometheus/procfs/net_unix.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_cgroup.go create mode 100644 vendor/github.com/prometheus/procfs/proc_environ.go create mode 100644 vendor/github.com/prometheus/procfs/proc_fdinfo.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_maps.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 vendor/github.com/prometheus/procfs/proc_psi.go create mode 100644 vendor/github.com/prometheus/procfs/proc_smaps.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/proc_status.go create mode 100644 vendor/github.com/prometheus/procfs/schedstat.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/prometheus/procfs/swaps.go create mode 100644 vendor/github.com/prometheus/procfs/ttar create mode 100644 vendor/github.com/prometheus/procfs/vm.go create mode 100644 vendor/github.com/prometheus/procfs/xfrm.go create mode 100644 vendor/github.com/prometheus/procfs/zoneinfo.go create mode 100644 vendor/k8s.io/component-base/metrics/OWNERS create mode 100644 vendor/k8s.io/component-base/metrics/collector.go create mode 100644 vendor/k8s.io/component-base/metrics/counter.go create mode 100644 vendor/k8s.io/component-base/metrics/desc.go create mode 100644 vendor/k8s.io/component-base/metrics/gauge.go create mode 100644 vendor/k8s.io/component-base/metrics/histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/http.go create mode 100644 vendor/k8s.io/component-base/metrics/labels.go create mode 100644 vendor/k8s.io/component-base/metrics/legacyregistry/registry.go create mode 100644 vendor/k8s.io/component-base/metrics/metric.go create mode 100644 vendor/k8s.io/component-base/metrics/options.go create mode 100644 vendor/k8s.io/component-base/metrics/opts.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime.go create mode 100644 vendor/k8s.io/component-base/metrics/registry.go create mode 100644 vendor/k8s.io/component-base/metrics/summary.go create mode 100644 vendor/k8s.io/component-base/metrics/value.go create mode 100644 vendor/k8s.io/component-base/metrics/version.go create mode 100644 vendor/k8s.io/component-base/metrics/version_parser.go create mode 100644 vendor/k8s.io/component-base/metrics/wrappers.go create mode 100644 vendor/k8s.io/component-base/version/.gitattributes create mode 100644 vendor/k8s.io/component-base/version/base.go create mode 100644 vendor/k8s.io/component-base/version/def.bzl create mode 100644 vendor/k8s.io/component-base/version/version.go diff --git a/go.sum b/go.sum index 4fe5d5b020..4a265c3f17 100644 --- a/go.sum +++ b/go.sum @@ -55,10 +55,13 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -241,6 +244,7 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -274,16 +278,20 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 0000000000..339177be66 --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 0000000000..1602287d7c --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 0000000000..d7d14f8eb6 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml new file mode 100644 index 0000000000..102fb9a691 --- /dev/null +++ b/vendor/github.com/blang/semver/.travis.yml @@ -0,0 +1,21 @@ +language: go +matrix: + include: + - go: 1.4.3 + - go: 1.5.4 + - go: 1.6.3 + - go: 1.7 + - go: tip + allow_failures: + - go: tip +install: +- go get golang.org/x/tools/cmd/cover +- go get github.com/mattn/goveralls +script: +- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci + -repotoken $COVERALLS_TOKEN +- echo "Build examples" ; cd examples && go build +- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) +env: + global: + secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE new file mode 100644 index 0000000000..5ba5c86fcb --- /dev/null +++ b/vendor/github.com/blang/semver/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md new file mode 100644 index 0000000000..08b2e4a3d7 --- /dev/null +++ b/vendor/github.com/blang/semver/README.md @@ -0,0 +1,194 @@ +semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) +====== + +semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. + +Usage +----- +```bash +$ go get github.com/blang/semver +``` +Note: Always vendor your dependencies or fix on a specific version tag. + +```go +import github.com/blang/semver +v1, err := semver.Make("1.0.0-beta") +v2, err := semver.Make("2.0.0-beta") +v1.Compare(v2) +``` + +Also check the [GoDocs](http://godoc.org/github.com/blang/semver). + +Why should I use this lib? +----- + +- Fully spec compatible +- No reflection +- No regex +- Fully tested (Coverage >99%) +- Readable parsing/validation errors +- Fast (See [Benchmarks](#benchmarks)) +- Only Stdlib +- Uses values instead of pointers +- Many features, see below + + +Features +----- + +- Parsing and validation at all levels +- Comparator-like comparisons +- Compare Helper Methods +- InPlace manipulation +- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` +- Wildcards `>=1.x`, `<=2.5.x` +- Sortable (implements sort.Interface) +- database/sql compatible (sql.Scanner/Valuer) +- encoding/json compatible (json.Marshaler/Unmarshaler) + +Ranges +------ + +A `Range` is a set of conditions which specify which versions satisfy the range. + +A condition is composed of an operator and a version. The supported operators are: + +- `<1.0.0` Less than `1.0.0` +- `<=1.0.0` Less than or equal to `1.0.0` +- `>1.0.0` Greater than `1.0.0` +- `>=1.0.0` Greater than or equal to `1.0.0` +- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` +- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. + +Note that spaces between the operator and the version will be gracefully tolerated. + +A `Range` can link multiple `Ranges` separated by space: + +Ranges can be linked by logical AND: + + - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` + - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` + +Ranges can also be linked by logical OR: + + - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` + +AND has a higher precedence than OR. It's not possible to use brackets. + +Ranges can be combined by both AND and OR + + - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` + +Range usage: + +``` +v, err := semver.Parse("1.2.3") +range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") +if range(v) { + //valid +} + +``` + +Example +----- + +Have a look at full examples in [examples/main.go](examples/main.go) + +```go +import github.com/blang/semver + +v, err := semver.Make("0.0.1-alpha.preview+123.github") +fmt.Printf("Major: %d\n", v.Major) +fmt.Printf("Minor: %d\n", v.Minor) +fmt.Printf("Patch: %d\n", v.Patch) +fmt.Printf("Pre: %s\n", v.Pre) +fmt.Printf("Build: %s\n", v.Build) + +// Prerelease versions array +if len(v.Pre) > 0 { + fmt.Println("Prerelease versions:") + for i, pre := range v.Pre { + fmt.Printf("%d: %q\n", i, pre) + } +} + +// Build meta data array +if len(v.Build) > 0 { + fmt.Println("Build meta data:") + for i, build := range v.Build { + fmt.Printf("%d: %q\n", i, build) + } +} + +v001, err := semver.Make("0.0.1") +// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE +v001.GT(v) == true +v.LT(v001) == true +v.GTE(v) == true +v.LTE(v) == true + +// Or use v.Compare(v2) for comparisons (-1, 0, 1): +v001.Compare(v) == 1 +v.Compare(v001) == -1 +v.Compare(v) == 0 + +// Manipulate Version in place: +v.Pre[0], err = semver.NewPRVersion("beta") +if err != nil { + fmt.Printf("Error parsing pre release version: %q", err) +} + +fmt.Println("\nValidate versions:") +v.Build[0] = "?" + +err = v.Validate() +if err != nil { + fmt.Printf("Validation failed: %s\n", err) +} +``` + + +Benchmarks +----- + + BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op + BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op + BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op + BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op + BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op + BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op + BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op + BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op + BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op + BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op + BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op + BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op + BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op + BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op + BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op + BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op + +See benchmark cases at [semver_test.go](semver_test.go) + + +Motivation +----- + +I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. + + +Contribution +----- + +Feel free to make a pull request. For bigger changes create a issue first to discuss about it. + + +License +----- + +See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go new file mode 100644 index 0000000000..a74bf7c449 --- /dev/null +++ b/vendor/github.com/blang/semver/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json new file mode 100644 index 0000000000..1cf8ebdd9c --- /dev/null +++ b/vendor/github.com/blang/semver/package.json @@ -0,0 +1,17 @@ +{ + "author": "blang", + "bugs": { + "URL": "https://github.com/blang/semver/issues", + "url": "https://github.com/blang/semver/issues" + }, + "gx": { + "dvcsimport": "github.com/blang/semver" + }, + "gxVersion": "0.10.0", + "language": "go", + "license": "MIT", + "name": "semver", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "3.5.1" +} + diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go new file mode 100644 index 0000000000..fca406d479 --- /dev/null +++ b/vendor/github.com/blang/semver/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Index(ap, "x") != -1 { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go new file mode 100644 index 0000000000..8ee0842e6a --- /dev/null +++ b/vendor/github.com/blang/semver/semver.go @@ -0,0 +1,418 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precendence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (vp *Version, err error) { + v, err := Parse(s) + vp = &v + return +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions +// with only major and minor components specified +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + s = strings.Join(parts, ".") + } + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go new file mode 100644 index 0000000000..e18f880826 --- /dev/null +++ b/vendor/github.com/blang/semver/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go new file mode 100644 index 0000000000..eb4d802666 --- /dev/null +++ b/vendor/github.com/blang/semver/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml new file mode 100644 index 0000000000..c516ea88da --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - "1.x" + - master +env: + - TAGS="" + - TAGS="-tags purego" +script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 0000000000..24b53065f4 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 0000000000..2fd8693c21 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,67 @@ +# xxhash + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod new file mode 100644 index 0000000000..49f67608bf --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/cespare/xxhash/v2 + +go 1.11 diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 0000000000..db0b35fbe3 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,236 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go new file mode 100644 index 0000000000..ad14b807f4 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 0000000000..d580e32aed --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), CX + MOVQ b_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ b_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 0000000000..4a5a821603 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 0000000000..fc9bea7a31 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,15 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 0000000000..53bf76efbc --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,46 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "reflect" + "unsafe" +) + +// Notes: +// +// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ +// for some discussion about these unsafe conversions. +// +// In the future it's possible that compiler optimizations will make these +// unsafe operations unnecessary: https://golang.org/issue/2205. +// +// Both of these wrapper functions still incur function call overhead since they +// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write +// for strings to squeeze out a bit more speed. Mid-stack inlining should +// eventually fix this. + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return d.Write(b) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE new file mode 100644 index 0000000000..5d8cb5b72e --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore new file mode 100644 index 0000000000..e16fb946bb --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore @@ -0,0 +1 @@ +cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile new file mode 100644 index 0000000000..81be214370 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile @@ -0,0 +1,7 @@ +all: + +cover: + go test -cover -v -coverprofile=cover.dat ./... + go tool cover -func cover.dat + +.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 0000000000..258c0636aa --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + var headerBuf [binary.MaxVarintLen32]byte + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 0000000000..c318385cbe --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 0000000000..8fb59ad226 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + var buf [binary.MaxVarintLen32]byte + encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 0000000000..dd878a30ee --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore new file mode 100644 index 0000000000..3460f0346d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore @@ -0,0 +1 @@ +command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md new file mode 100644 index 0000000000..44986bff06 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md @@ -0,0 +1 @@ +See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go new file mode 100644 index 0000000000..288f0e8548 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.12 + +package prometheus + +import "runtime/debug" + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. +func readBuildInfo() (path, version, sum string) { + path, version, sum = "unknown", "unknown", "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + path = bi.Main.Path + version = bi.Main.Version + sum = bi.Main.Sum + } + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go new file mode 100644 index 0000000000..6609e2877c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.12 + +package prometheus + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before +// 1.12. Remove this whole file once the minimum supported Go version is 1.12. +func readBuildInfo() (path, version, sum string) { + return "unknown", "unknown", "unknown" +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 0000000000..1e839650d4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,120 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. + // + // It is valid if one and the same Collector sends duplicate + // descriptors. Those duplicates are simply ignored. However, two + // different Collectors must not send duplicate descriptors. + // + // Sending no descriptor at all marks the Collector as “unchecked”, + // i.e. no checks will be performed at registration time, and the + // Collector may yield any Metric it sees fit in its Collect method. + // + // This method idempotently sends the same descriptors throughout the + // lifetime of the Collector. It may be called concurrently and + // therefore must be implemented in a concurrency safe way. + // + // If a Collector encounters an error while executing this method, it + // must send an invalid descriptor (created with NewInvalidDesc) to + // signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by Describe + // (unless the Collector is unchecked, see above). Returned metrics that + // share the same descriptor must differ in their variable label + // values. + // + // This method may be called concurrently and must therefore be + // implemented in a concurrency safe way. Blocking occurs at the expense + // of total performance of rendering all registered metrics. Ideally, + // Collector implementations support concurrent readers. + Collect(chan<- Metric) +} + +// DescribeByCollect is a helper to implement the Describe method of a custom +// Collector. It collects the metrics from the provided Collector and sends +// their descriptors to the provided channel. +// +// If a Collector collects the same metrics throughout its lifetime, its +// Describe method can simply be implemented as: +// +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } +// +// However, this will not work if the metrics collected change dynamically over +// the lifetime of the Collector in a way that their combined set of descriptors +// changes as well. The shortcut implementation will then violate the contract +// of the Describe method. If a Collector sometimes collects no metrics at all +// (for example vectors like CounterVec, GaugeVec, etc., which only collect +// metrics after a metric with a fully specified label set has been accessed), +// it might even get registered as an unchecked Collector (cf. the Register +// method of the Registerer interface). Hence, only use this shortcut +// implementation of Describe if you are certain to fulfill the contract. +// +// The Collector example demonstrates a use of DescribeByCollect. +func DescribeByCollect(c Collector, descs chan<- *Desc) { + metrics := make(chan Metric) + go func() { + c.Collect(metrics) + close(metrics) + }() + for m := range metrics { + descs <- m.Desc() + } +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 0000000000..0e1b48c03f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,321 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// ExemplarAdder is implemented by Counters that offer the option of adding a +// value to the Counter together with an exemplar. Its AddWithExemplar method +// works like the Add method of the Counter interface but also replaces the +// currently saved exemplar (if any) with a new one, created from the provided +// value, the current time as timestamp, and the provided labels. Empty Labels +// will lead to a valid (label-less) exemplar. But if Labels is nil, the current +// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any +// of the provided labels are invalid, or if the provided labels contain more +// than 64 runes in total. +type ExemplarAdder interface { + AddWithExemplar(value float64, exemplar Labels) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation also implements ExemplarAdder. It is safe to +// perform the corresponding type assertion. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair + exemplar atomic.Value // Containing nil or a *dto.Exemplar. + + now func() time.Time // To mock out time.Now() for testing. +} + +func (c *counter) Desc() *Desc { + return c.desc +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) AddWithExemplar(v float64, e Labels) { + c.Add(v) + c.updateExemplar(v, e) +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + var exemplar *dto.Exemplar + if e := c.exemplar.Load(); e != nil { + exemplar = e.(*dto.Exemplar) + } + + return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) +} + +func (c *counter) updateExemplar(v float64, l Labels) { + if l == nil { + return + } + e, err := newExemplar(v, c.now(), l) + if err != nil { + panic(err) + } + c.exemplar.Store(e) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +type CounterVec struct { + *metricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +// +// Check out the ExampleGaugeFunc examples for the similar GaugeFunc. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 0000000000..2f19f5e1e7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,186 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/cespare/xxhash/v2" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occurred during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if !model.IsValidMetricName(model.LabelValue(fqName)) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + + xxh := xxhash.New() + for _, val := range labelValues { + xxh.WriteString(val) + xxh.Write(separatorByteSlice) + } + d.id = xxh.Sum64() + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + xxh.Reset() + xxh.WriteString(help) + xxh.Write(separatorByteSlice) + for _, labelName := range labelNames { + xxh.WriteString(labelName) + xxh.Write(separatorByteSlice) + } + d.dimHash = xxh.Sum64() + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(labelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 0000000000..98450125d6 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,199 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus is the core instrumentation package. It provides metrics +// primitives to instrument code for monitoring. It also offers a registry for +// metrics. Sub-packages allow to expose the registered metrics via HTTP +// (package promhttp) or push them to a Pushgateway (package push). There is +// also a sub-package promauto, which provides metrics constructors with +// automatic registration. +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. However, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. Furthermore, if you are not concerned with +// fine-grained control of when and how to register metrics with the registry, +// have a look at the promauto package, which will effectively allow you to +// ignore registration altogether in simple cases. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// In addition to the fundamental metric types Gauge, Counter, Summary, and +// Histogram, a very important part of the Prometheus data model is the +// partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// and HistogramVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and +// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, +// and HistogramVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). NewConstMetric is used +// for all metric types with just a float64 as their value: Counter, Gauge, and +// a special “type” called Untyped. Use the latter if you are not sure if the +// mirrored metric is a Counter or a Gauge. Creation of the Metric instance +// happens in the Collect method. The Describe method has to return separate +// Desc instances, representative of the “throw-away” metrics to be created +// later. NewDesc comes in handy to create those Desc instances. Alternatively, +// you could return no Desc at all, which will mark the Collector “unchecked”. +// No checks are performed at registration time, but metric consistency will +// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape +// errors. Thus, with unchecked Collectors, the responsibility to not collect +// metrics that lead to inconsistencies in the total scrape result lies with the +// implementer of the Collector. While this is not a desirable state, it is +// sometimes necessary. The typical use case is a situation where the exact +// metrics to be returned by a Collector cannot be predicted at registration +// time, but the implementer has sufficient knowledge of the whole system to +// guarantee metric consistency. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegisterer variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegisterer comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 0000000000..18a99d5faa --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 0000000000..3d383a735c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 0000000000..d67573f767 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,289 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. +func NewGauge(opts GaugeOpts) Gauge { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, nil, out) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *metricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. Therefore, it must be safe to call the provided function +// concurrently. +// +// NewGaugeFunc is a good way to create an “info” style metric with a constant +// value of 1. Example: +// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 0000000000..ea05cf429f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,396 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "runtime" + "runtime/debug" + "sync" + "time" +) + +type goCollector struct { + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc + + // ms... are memstats related. + msLast *runtime.MemStats // Previously collected memstats. + msLastTimestamp time.Time + msMtx sync.Mutex // Protects msLast and msLastTimestamp. + msMetrics memStatsMetrics + msRead func(*runtime.MemStats) // For mocking in tests. + msMaxWait time.Duration // Wait time for fresh memstats. + msMaxAge time.Duration // Maximum allowed age of old memstats. +} + +// NewGoCollector returns a collector that exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. (The problem might be solved +// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go +// issue.) +func NewGoCollector() Collector { + return &goCollector{ + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the pause duration of garbage collection cycles.", + nil, nil), + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + msLast: &runtime.MemStats{}, + msRead: runtime.ReadMemStats, + msMaxWait: time.Second, + msMaxAge: 5 * time.Minute, + msMetrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return "go_memstats_" + s +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutinesDesc + ch <- c.threadsDesc + ch <- c.gcDesc + ch <- c.goInfoDesc + for _, i := range c.msMetrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + var ( + ms = &runtime.MemStats{} + done = make(chan struct{}) + ) + // Start reading memstats first as it might take a while. + go func() { + c.msRead(ms) + c.msMtx.Lock() + c.msLast = ms + c.msLastTimestamp = time.Now() + c.msMtx.Unlock() + close(done) + }() + + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) + + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + + timer := time.NewTimer(c.msMaxWait) + select { + case <-done: // Our own ReadMemStats succeeded in time. Use it. + timer.Stop() // Important for high collection frequencies to not pile up timers. + c.msCollect(ch, ms) + return + case <-timer.C: // Time out, use last memstats if possible. Continue below. + } + c.msMtx.Lock() + if time.Since(c.msLastTimestamp) < c.msMaxAge { + // Last memstats are recent enough. Collect from them under the lock. + c.msCollect(ch, c.msLast) + c.msMtx.Unlock() + return + } + // If we are here, the last memstats are too old or don't exist. We have + // to wait until our own ReadMemStats finally completes. For that to + // happen, we have to release the lock. + c.msMtx.Unlock() + <-done + c.msCollect(ch, ms) +} + +func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { + for _, i := range c.msMetrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() Collector { + path, version, sum := readBuildInfo() + c := &selfCollector{MustNewConstMetric( + NewDesc( + "go_build_info", + "Build information about the main Go module.", + nil, Labels{"path": path, "version": version, "checksum": sum}, + ), + GaugeValue, 1)} + c.init(c.self) + return c +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 0000000000..d4ea301a33 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,637 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + //lint:ignore SA1019 Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +// +// The returned implementation also implements ExemplarObserver. It is safe to +// perform the corresponding type assertion. Exemplars are tracked separately +// for each bucket. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*histogramCounts{{}, {}}, + now: time.Now, + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make buckets + // for both counts as well as exemplars: + h.counts[0].buckets = make([]uint64, len(h.upperBounds)) + h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) + + h.init(h) // Init self-collection. + return h +} + +type histogramCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + buckets []uint64 +} + +type histogram struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // histogramCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the histogram) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + // + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*histogramCounts + + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + + now func() time.Time // To mock out time.Now() for testing. +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + h.observe(v, h.findBucket(v)) +} + +func (h *histogram) ObserveWithExemplar(v float64, e Labels) { + i := h.findBucket(v) + h.observe(v, i) + h.updateExemplar(v, i, e) +} + +func (h *histogram) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + h.writeMtx.Lock() + defer h.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := h.counts[n>>63] + coldCounts := h.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + his := &dto.Histogram{ + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + var cumCount uint64 + for i, upperBound := range h.upperBounds { + cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) + his.Bucket[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(cumCount), + UpperBound: proto.Float64(upperBound), + } + if e := h.exemplars[i].Load(); e != nil { + his.Bucket[i].Exemplar = e.(*dto.Exemplar) + } + } + // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. + if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { + b := &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(math.Inf(1)), + Exemplar: e.(*dto.Exemplar), + } + his.Bucket = append(his.Bucket, b) + } + + out.Histogram = his + out.Label = h.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + for i := range h.upperBounds { + atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) + atomic.StoreUint64(&coldCounts.buckets[i], 0) + } + return nil +} + +// findBucket returns the index of the bucket for the provided value, or +// len(h.upperBounds) for the +Inf bucket. +func (h *histogram) findBucket(v float64) int { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + return sort.SearchFloat64s(h.upperBounds, v) +} + +// observe is the implementation for Observe without the findBucket part. +func (h *histogram) observe(v float64, bucket int) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1) + hotCounts := h.counts[n>>63] + + if bucket < len(h.upperBounds) { + atomic.AddUint64(&hotCounts.buckets[bucket], 1) + } + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +// updateExemplar replaces the exemplar for the provided bucket. With empty +// labels, it's a no-op. It panics if any of the labels is invalid. +func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { + if l == nil { + return + } + e, err := newExemplar(v, h.now(), l) + if err != nil { + panic(err) + } + h.exemplars[bucket].Store(e) +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *metricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstHistogram would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go new file mode 100644 index 0000000000..351c26e1ae --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -0,0 +1,85 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "sort" + + dto "github.com/prometheus/client_model/go" +) + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// NormalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 0000000000..2744443ac2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { + return fmt.Errorf( + "%s: %q has %d variable labels named %q but %d values %q were provided", + errInconsistentCardinality, fqName, + len(labels), labels, + len(labelValues), labelValues, + ) +} + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(labels), labels, + ) + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(vals), vals, + ) + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 0000000000..35bd8bde34 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,176 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + "time" + + //lint:ignore SA1019 Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. Callers of Write should + // still make sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name to a non-empty string. All other fields are +// optional and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// labelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type labelPairSorter []*dto.LabelPair + +func (s labelPairSorter) Len() int { + return len(s) +} + +func (s labelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s labelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } + +type timestampedMetric struct { + Metric + t time.Time +} + +func (m timestampedMetric) Write(pb *dto.Metric) error { + e := m.Metric.Write(pb) + pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) + return e +} + +// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a +// way that it has an explicit timestamp set to the provided Time. This is only +// useful in rare cases as the timestamp of a Prometheus metric should usually +// be set by the Prometheus server during scraping. Exceptions include mirroring +// metrics with given timestamps from other metric +// sources. +// +// NewMetricWithTimestamp works best with MustNewConstMetric, +// MustNewConstHistogram, and MustNewConstSummary, see example. +// +// Currently, the exposition formats used by Prometheus are limited to +// millisecond resolution. Thus, the provided time will be rounded down to the +// next full millisecond value. +func NewMetricWithTimestamp(t time.Time, m Metric) Metric { + return timestampedMetric{Metric: m, t: t} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 0000000000..44128016fd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} + +// ExemplarObserver is implemented by Observers that offer the option of +// observing a value together with an exemplar. Its ObserveWithExemplar method +// works like the Observe method of an Observer but also replaces the currently +// saved exemplar (if any) with a new one, created from the provided value, the +// current time as timestamp, and the provided Labels. Empty Labels will lead to +// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is +// left in place. ObserveWithExemplar panics if any of the provided labels are +// invalid or if the provided labels contain more than 64 runes in total. +type ExemplarObserver interface { + ObserveWithExemplar(value float64, exemplar Labels) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 0000000000..9b80979421 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,151 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "os" +) + +type processCollector struct { + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc +} + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. +func NewProcessCollector(opts ProcessCollectorOpts) Collector { + ns := "" + if len(opts.Namespace) > 0 { + ns = opts.Namespace + "_" + } + + c := &processCollector{ + reportErrors: opts.ReportErrors, + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + maxVsize: NewDesc( + ns+"process_virtual_memory_max_bytes", + "Maximum amount of virtual memory available in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + if opts.PidFn == nil { + pid := os.Getpid() + c.pidFn = func() (int, error) { return pid, nil } + } else { + c.pidFn = opts.PidFn + } + + // Set up process metric collection if supported by the runtime. + if canCollectProcess() { + c.collectFn = c.processCollect + } else { + c.collectFn = func(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) + } + } + + return c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { + if !c.reportErrors { + return + } + if desc == nil { + desc = NewInvalidDesc(err) + } + ch <- NewInvalidMetric(desc, err) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go new file mode 100644 index 0000000000..3117461cde --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package prometheus + +import ( + "github.com/prometheus/procfs" +) + +func canCollectProcess() bool { + _, err := procfs.NewDefaultFS() + return err == nil +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + c.reportError(ch, nil, err) + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + c.reportError(ch, nil, err) + return + } + + if stat, err := p.Stat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, nil, err) + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) + } + + if limits, err := p.Limits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go new file mode 100644 index 0000000000..f973398df2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func canCollectProcess() bool { + return true +} + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") +) + +type processMemoryCounters struct { + // System interface description + // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex + + // Refer to the Golang internal implementation + // https://golang.org/src/internal/syscall/windows/psapi_windows.go + _ uint32 + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivateUsage uintptr +} + +func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { + mem := processMemoryCounters{} + r1, _, err := procGetProcessMemoryInfo.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&mem)), + uintptr(unsafe.Sizeof(mem)), + ) + if r1 != 1 { + return mem, err + } else { + return mem, nil + } +} + +func getProcessHandleCount(handle windows.Handle) (uint32, error) { + var count uint32 + r1, _, err := procGetProcessHandleCount.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&count)), + ) + if r1 != 1 { + return 0, err + } else { + return count, nil + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + h, err := windows.GetCurrentProcess() + if err != nil { + c.reportError(ch, nil, err) + return + } + + var startTime, exitTime, kernelTime, userTime windows.Filetime + err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) + + mem, err := getProcessMemoryInfo(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) + + handles, err := getProcessHandleCount(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. +} + +func fileTimeToSeconds(ft windows.Filetime) float64 { + return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go new file mode 100644 index 0000000000..5070e72e28 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -0,0 +1,370 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + if r.observeWriteHeader != nil && !r.wroteHeader { + // Only call observeWriteHeader for the 1st time. It's a bug if + // WriteHeader is called more than once, but we want to protect + // against it here. Note that we still delegate the WriteHeader + // to the original ResponseWriter to not mask the bug from it. + r.observeWriteHeader(code) + } + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type closeNotifierDelegator struct{ *responseWriterDelegator } +type flusherDelegator struct{ *responseWriterDelegator } +type hijackerDelegator struct{ *responseWriterDelegator } +type readerFromDelegator struct{ *responseWriterDelegator } +type pusherDelegator struct{ *responseWriterDelegator } + +func (d closeNotifierDelegator) CloseNotify() <-chan bool { + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} +func (d flusherDelegator) Flush() { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + d.ResponseWriter.(http.Flusher).Flush() +} +func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} +func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 0000000000..5e1c4546ce --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,379 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, +} + +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. +func Handler() http.Handler { + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + var ( + inFlightSem chan struct{} + errCnt = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_errors_total", + Help: "Total number of internal errors encountered by the promhttp metric handler.", + }, + []string{"cause"}, + ) + ) + + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + if opts.Registry != nil { + // Initialize all possibilites that can occur below. + errCnt.WithLabelValues("gathering") + errCnt.WithLabelValues("encoding") + if err := opts.Registry.Register(errCnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + errCnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + } + + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(rsp, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + errCnt.WithLabelValues("gathering").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + // Still report the error if no metrics have been gathered. + httpError(rsp, err) + return + } + case HTTPErrorOnError: + httpError(rsp, err) + return + } + } + + var contentType expfmt.Format + if opts.EnableOpenMetrics { + contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) + } else { + contentType = expfmt.Negotiate(req.Header) + } + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if !opts.DisableCompression && gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + + // handleError handles the error according to opts.ErrorHandling + // and returns true if we have to abort after the handling. + handleError := func(err error) bool { + if err == nil { + return false + } + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding and sending metric family:", err) + } + errCnt.WithLabelValues("encoding").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case HTTPErrorOnError: + // We cannot really send an HTTP error at this + // point because we most likely have written + // something to rsp already. But at least we can + // stop sending. + return true + } + // Do nothing in all other cases, including ContinueOnError. + return false + } + + for _, mf := range mfs { + if handleError(enc.Encode(mf)) { + return + } + } + if closer, ok := enc.(expfmt.Closer); ok { + // This in particular takes care of the final "# EOF\n" line for OpenMetrics. + if handleError(closer.Close()) { + return + } + } + }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. Note that HTTP + // errors cannot be served anymore once the beginning of a regular + // payload has been sent. Thus, in the (unlikely) case that encoding the + // payload into the negotiated wire format fails, serving the response + // will simply be aborted. Set an ErrorLog in HandlerOpts to detect + // those errors. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If Registry is not nil, it is used to register a metric + // "promhttp_metric_handler_errors_total", partitioned by "cause". A + // failed registration causes a panic. Note that this error counter is + // different from the instrumentation you get from the various + // InstrumentHandler... helpers. It counts errors that don't necessarily + // result in a non-2xx HTTP status code. There are two typical cases: + // (1) Encoding errors that only happen after streaming of the HTTP body + // has already started (and the status code 200 has been sent). This + // should only happen with custom collectors. (2) Collection errors with + // no effect on the HTTP status code because ErrorHandling is set to + // ContinueOnError. + Registry prometheus.Registerer + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration + // If true, the experimental OpenMetrics encoding is added to the + // possible options during content negotiation. Note that Prometheus + // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is + // the only way to transmit exemplars. However, the move to OpenMetrics + // is not completely transparent. Most notably, the values of "quantile" + // labels of Summaries and "le" labels of Histograms are formatted with + // a trailing ".0" if they would otherwise look like integer numbers + // (which changes the identity of the resulting series on the Prometheus + // server). + EnableOpenMetrics bool +} + +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return true + } + } + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerError. Error contents is +// supposed to be uncompressed plain text. Same as with a plain http.Error, this +// must not be called if the header or any payload has already been sent. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 0000000000..83c49b66a8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,219 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 0000000000..9db2438053 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,447 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have zero, one, or two non-const non-curried labels. For those, the only +// allowed label names are "code" and "method". The function panics +// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or +// HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the response size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 0000000000..ba94405af4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,948 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "sync" + "unicode/utf8" + + "github.com/cespare/xxhash/v2" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector, in particular the note about +// stop-the-world implication with Go versions older than 1.9) already +// registered. This approach to keep default instances as global state mirrors +// the approach of other packages in the Go standard library. Note that there +// are caveats. Change the variables with caution and only if you understand the +// consequences. Users who want to avoid global state altogether should not use +// the convenience functions and act on custom instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(ProcessCollectorOpts{})) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. Unchecked Collectors (those whose +// Describe method does not yield any descriptors) are excluded from the check. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather than the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // A Collector whose Describe method does not yield any Desc is treated + // as unchecked. Registration will always succeed. No check for + // re-registering (see previous paragraph) is performed. Thus, the + // caller is responsible for not double-registering the same unchecked + // Collector, and for providing a Collector that will not cause + // inconsistent metrics on collection. (This would lead to scrape + // errors.) + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. Note that an unchecked + // Collector cannot be unregistered (as its Describe method does not + // yield any descriptor). + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + uncheckedCollectors []Collector + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // All desc IDs XOR'd together. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer func() { + // Drain channel in case of premature return to not leak a goroutine. + for range descChan { + } + r.mtx.Unlock() + }() + // Conduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, XOR it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID ^= desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // A Collector yielding no Desc at all is considered unchecked. + if len(newDescIDs) == 0 { + r.uncheckedCollectors = append(r.uncheckedCollectors, c) + return nil + } + if existing, exists := r.collectorsByID[collectorID]; exists { + switch e := existing.(type) { + case *wrappingCollector: + return AlreadyRegisteredError{ + ExistingCollector: e.unwrapRecursively(), + NewCollector: c, + } + default: + return AlreadyRegisteredError{ + ExistingCollector: e, + NewCollector: c, + } + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // All desc IDs XOR'd together. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID ^= desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + checkedMetricChan = make(chan Metric, capMetricChan) + uncheckedMetricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + checkedCollectors := make(chan Collector, len(r.collectorsByID)) + uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) + for _, collector := range r.collectorsByID { + checkedCollectors <- collector + } + for _, collector := range r.uncheckedCollectors { + uncheckedCollectors <- collector + } + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + r.mtx.RUnlock() + + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-checkedCollectors: + collector.Collect(checkedMetricChan) + case collector := <-uncheckedCollectors: + collector.Collect(uncheckedMetricChan) + default: + return + } + wg.Done() + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close checkedMetricChan and uncheckedMetricChan once all collectors + // are collected. + go func() { + wg.Wait() + close(checkedMetricChan) + close(uncheckedMetricChan) + }() + + // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. + defer func() { + if checkedMetricChan != nil { + for range checkedMetricChan { + } + } + if uncheckedMetricChan != nil { + for range uncheckedMetricChan { + } + } + }() + + // Copy the channel references so we can nil them out later to remove + // them from the select statements below. + cmc := checkedMetricChan + umc := uncheckedMetricChan + + for { + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + default: + if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { + // All collectors are already being worked on or + // we have already as many goroutines started as + // there are collectors. Do the same as above, + // just without the default. + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + } + break + } + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() + } + // Once both checkedMetricChan and uncheckdMetricChan are closed + // and drained, the contraption above will nil out cmc and umc, + // and then we can leave the collect loop here. + if cmc == nil && umc == nil { + break + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the +// Prometheus text format, and writes it to a temporary file. Upon success, the +// temporary file is renamed to the provided filename. +// +// This is intended for use with the textfile collector of the node exporter. +// Note that the node exporter expects the filename to be suffixed with ".prom". +func WriteToTextfile(filename string, g Gatherer) error { + tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + if err != nil { + return err + } + defer os.Remove(tmp.Name()) + + mfs, err := g.Gather() + if err != nil { + return err + } + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + + if err := os.Chmod(tmp.Name(), 0644); err != nil { + return err + } + return os.Rename(tmp.Name(), filename) +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + // Wrapped metrics collected by an unchecked Collector can have an + // invalid Desc. + if desc.err != nil { + return desc.err + } + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { // Existing name. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { // New name. + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { + return err + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calls are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { + errs = append(errs, err) + continue + } + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// checkSuffixCollisions checks for collisions with the “magic” suffixes the +// Prometheus text format and the internal metric representation of the +// Prometheus server add while flattening Summaries and Histograms. +func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { + var ( + newName = mf.GetName() + newType = mf.GetType() + newNameWithoutSuffix = "" + ) + switch { + case strings.HasSuffix(newName, "_count"): + newNameWithoutSuffix = newName[:len(newName)-6] + case strings.HasSuffix(newName, "_sum"): + newNameWithoutSuffix = newName[:len(newName)-4] + case strings.HasSuffix(newName, "_bucket"): + newNameWithoutSuffix = newName[:len(newName)-7] + } + if newNameWithoutSuffix != "" { + if existingMF, ok := mfs[newNameWithoutSuffix]; ok { + switch existingMF.GetType() { + case dto.MetricType_SUMMARY: + if !strings.HasSuffix(newName, "_bucket") { + return fmt.Errorf( + "collected metric named %q collides with previously collected summary named %q", + newName, newNameWithoutSuffix, + ) + } + case dto.MetricType_HISTOGRAM: + return fmt.Errorf( + "collected metric named %q collides with previously collected histogram named %q", + newName, newNameWithoutSuffix, + ) + } + } + } + if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_count"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_count", + ) + } + if _, ok := mfs[newName+"_sum"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_sum", + ) + } + } + if newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_bucket"]; ok { + return fmt.Errorf( + "collected histogram named %q collides with previously collected metric named %q", + newName, newName+"_bucket", + ) + } + } + return nil +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashes the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, +) error { + name := metricFamily.GetName() + + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %q { %s} is not a %s", + name, dtoMetric, metricFamily.GetType(), + ) + } + + previousLabelName := "" + for _, labelPair := range dtoMetric.GetLabel() { + labelName := labelPair.GetName() + if labelName == previousLabelName { + return fmt.Errorf( + "collected metric %q { %s} has two or more labels with the same name: %s", + name, dtoMetric, labelName, + ) + } + if !checkLabelName(labelName) { + return fmt.Errorf( + "collected metric %q { %s} has a label with an invalid name: %s", + name, dtoMetric, labelName, + ) + } + if dtoMetric.Summary != nil && labelName == quantileLabel { + return fmt.Errorf( + "collected metric %q { %s} must not have an explicit %q label", + name, dtoMetric, quantileLabel, + ) + } + if !utf8.ValidString(labelPair.GetValue()) { + return fmt.Errorf( + "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", + name, dtoMetric, labelName, labelPair.GetValue()) + } + previousLabelName = labelName + } + + // Is the metric unique (i.e. no other metric with the same name and the same labels)? + h := xxhash.New() + h.WriteString(name) + h.Write(separatorByteSlice) + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + // We cannot sort dtoMetric.Label in place as it is immutable by contract. + copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) + copy(copiedLabels, dtoMetric.Label) + sort.Sort(labelPairSorter(copiedLabels)) + dtoMetric.Label = copiedLabels + } + for _, lp := range dtoMetric.Label { + h.WriteString(lp.GetName()) + h.Write(separatorByteSlice) + h.WriteString(lp.GetValue()) + h.Write(separatorByteSlice) + } + hSum := h.Sum64() + if _, exists := metricHashes[hSum]; exists { + return fmt.Errorf( + "collected metric %q { %s} was collected before with the same name and label values", + name, dtoMetric, + ) + } + metricHashes[hSum] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) + copy(lpsFromDesc, desc.constLabelPairs) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(labelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 0000000000..f3c1440d1c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,737 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/beorn7/perks/quantile" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. However, the default behavior will change in the +// upcoming v1.0.0 of the library. There will be no rank estimations at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +var errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v1.0.0 of the library. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Due to the way a Summary is represented in the Prometheus text format + // and how it is handled by the Prometheus server internally, “quantile” + // is an illegal label name. Construction of a Summary or SummaryVec + // will panic if this label name is used in ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is an empty map, resulting in a summary without + // quantiles. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Problem with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if opts.Objectives == nil { + opts.Objectives = map[float64]float64{} + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + if len(opts.Objectives) == 0 { + // Use the lock-free implementation of a Summary without objectives. + s := &noObjectivesSummary{ + desc: desc, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*summaryCounts{{}, {}}, + } + s.init(s) // Init self-collection. + return s + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type summaryCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 +} + +type noObjectivesSummary struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // summaryCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the summary) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*summaryCounts + + labelPairs []*dto.LabelPair +} + +func (s *noObjectivesSummary) Desc() *Desc { + return s.desc +} + +func (s *noObjectivesSummary) Observe(v float64) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&s.countAndHotIdx, 1) + hotCounts := s.counts[n>>63] + + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (s *noObjectivesSummary) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + s.writeMtx.Lock() + defer s.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := s.counts[n>>63] + coldCounts := s.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + sum := &dto.Summary{ + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + + out.Summary = sum + out.Label = s.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + return nil +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *metricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. +// +// Due to the way a Summary is represented in the Prometheus text format and how +// it is handled by the Prometheus server internally, “quantile” is an illegal +// label name. NewSummaryVec will panic if this label name is used. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + for _, ln := range labelNames { + if ln == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 0000000000..8d5f105233 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. The observed +// duration is also returned. ObserveDuration is usually called with a defer +// statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() time.Duration { + d := time.Since(t.begin) + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 0000000000..0f9ce63f40 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 0000000000..6206928cc6 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,205 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + "time" + "unicode/utf8" + + //lint:ignore SA1019 Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + + dto "github.com/prometheus/client_model/go" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. Use UntypedValue to mark a metric +// with an unknown type. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc or if Desc is +// invalid. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, nil, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + e *dto.Exemplar, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + labelPairs = append(labelPairs, desc.constLabelPairs...) + sort.Sort(labelPairSorter(labelPairs)) + return labelPairs +} + +// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. +const ExemplarMaxRunes = 64 + +// newExemplar creates a new dto.Exemplar from the provided values. An error is +// returned if any of the label names or values are invalid or if the total +// number of runes in the label names and values exceeds ExemplarMaxRunes. +func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { + e := &dto.Exemplar{} + e.Value = proto.Float64(value) + tsProto, err := ptypes.TimestampProto(ts) + if err != nil { + return nil, err + } + e.Timestamp = tsProto + labelPairs := make([]*dto.LabelPair, 0, len(l)) + var runes int + for name, value := range l { + if !checkLabelName(name) { + return nil, fmt.Errorf("exemplar label name %q is invalid", name) + } + runes += utf8.RuneCountInString(name) + if !utf8.ValidString(value) { + return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) + } + runes += utf8.RuneCountInString(value) + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(name), + Value: proto.String(value), + }) + } + if runes > ExemplarMaxRunes { + return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) + } + e.Label = labelPairs + return e, nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 0000000000..d53848dc48 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,484 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. +// It also handles label currying. +type metricVec struct { + *metricMap + + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +// Without explicit forwarding of Describe, Collect, Reset, those methods won't +// show up in GoDoc. + +// Describe implements Collector. +func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } + +// Collect implements Collector. +func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } + +// Reset deletes all metrics in this vector. +func (m *metricVec) Reset() { m.metricMap.Reset() } + +func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &metricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricMap) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.metrics { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.metrics { + delete(m.metrics, h) + } +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + + i := findMetricWithLabelValues(metrics, lvs, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + i := findMetricWithLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) + if !ok { + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) + if !ok { + lvs := extractLabelValues(m.desc, labels, curry) + metric = m.newMetric(lvs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabelValues(metric.values, lvs, curry) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { + return false + } + var iLVs, iCurry int + for i, v := range values { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { + return false + } + iLVs++ + } + return true +} + +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { + return false + } + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if values[i] != labels[k] { + return false + } + } + return true +} + +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = labels[k] + } + return labelValues +} + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go new file mode 100644 index 0000000000..438aa5e924 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -0,0 +1,212 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + //lint:ignore SA1019 Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// WrapRegistererWith returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. Wrapping a nil value is valid, resulting +// in a no-op Registerer. +// +// WrapRegistererWith provides a way to add fixed labels to a subset of +// Collectors. It should not be used to add fixed labels to all metrics exposed. +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +// +// The Collector example demonstrates a use of WrapRegistererWith. +func WrapRegistererWith(labels Labels, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + labels: labels, + } +} + +// WrapRegistererWithPrefix returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided prefix to the name of all Metrics it collects. +// Wrapping a nil value is valid, resulting in a no-op Registerer. +// +// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of +// a sub-system. To make this work, register metrics of the sub-system with the +// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful +// to use the same prefix for all metrics exposed. In particular, do not prefix +// metric names that are standardized across applications, as that would break +// horizontal monitoring, for example the metrics provided by the Go collector +// (see NewGoCollector) and the process collector (see NewProcessCollector). (In +// fact, those metrics are already prefixed with “go_” or “process_”, +// respectively.) +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + prefix: prefix, + } +} + +type wrappingRegisterer struct { + wrappedRegisterer Registerer + prefix string + labels Labels +} + +func (r *wrappingRegisterer) Register(c Collector) error { + if r.wrappedRegisterer == nil { + return nil + } + return r.wrappedRegisterer.Register(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +func (r *wrappingRegisterer) MustRegister(cs ...Collector) { + if r.wrappedRegisterer == nil { + return + } + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +func (r *wrappingRegisterer) Unregister(c Collector) bool { + if r.wrappedRegisterer == nil { + return false + } + return r.wrappedRegisterer.Unregister(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +type wrappingCollector struct { + wrappedCollector Collector + prefix string + labels Labels +} + +func (c *wrappingCollector) Collect(ch chan<- Metric) { + wrappedCh := make(chan Metric) + go func() { + c.wrappedCollector.Collect(wrappedCh) + close(wrappedCh) + }() + for m := range wrappedCh { + ch <- &wrappingMetric{ + wrappedMetric: m, + prefix: c.prefix, + labels: c.labels, + } + } +} + +func (c *wrappingCollector) Describe(ch chan<- *Desc) { + wrappedCh := make(chan *Desc) + go func() { + c.wrappedCollector.Describe(wrappedCh) + close(wrappedCh) + }() + for desc := range wrappedCh { + ch <- wrapDesc(desc, c.prefix, c.labels) + } +} + +func (c *wrappingCollector) unwrapRecursively() Collector { + switch wc := c.wrappedCollector.(type) { + case *wrappingCollector: + return wc.unwrapRecursively() + default: + return wc + } +} + +type wrappingMetric struct { + wrappedMetric Metric + prefix string + labels Labels +} + +func (m *wrappingMetric) Desc() *Desc { + return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) +} + +func (m *wrappingMetric) Write(out *dto.Metric) error { + if err := m.wrappedMetric.Write(out); err != nil { + return err + } + if len(m.labels) == 0 { + // No wrapping labels. + return nil + } + for ln, lv := range m.labels { + out.Label = append(out.Label, &dto.LabelPair{ + Name: proto.String(ln), + Value: proto.String(lv), + }) + } + sort.Sort(labelPairSorter(out.Label)) + return nil +} + +func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { + constLabels := Labels{} + for _, lp := range desc.constLabelPairs { + constLabels[*lp.Name] = *lp.Value + } + for ln, lv := range labels { + if _, alreadyUsed := constLabels[ln]; alreadyUsed { + return &Desc{ + fqName: desc.fqName, + help: desc.help, + variableLabels: desc.variableLabels, + constLabelPairs: desc.constLabelPairs, + err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), + } + } + constLabels[ln] = lv + } + // NewDesc will do remaining validations. + newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + // Propagate errors if there was any. This will override any errer + // created by NewDesc above, i.e. earlier errors get precedence. + if desc.err != nil { + newDesc.err = desc.err + } + return newDesc +} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 0000000000..20110e410e --- /dev/null +++ b/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 0000000000..2f4930d9dd --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,723 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: metrics.proto + +package io_prometheus_client + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} + +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} + +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} + +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} + +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{0} +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{0} +} + +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelPair.Unmarshal(m, b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) +} +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) +} +func (m *LabelPair) XXX_Size() int { + return xxx_messageInfo_LabelPair.Size(m) +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{1} +} + +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Gauge.Unmarshal(m, b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) +} +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) +} +func (m *Gauge) XXX_Size() int { + return xxx_messageInfo_Gauge.Size(m) +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{2} +} + +func (m *Counter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Counter.Unmarshal(m, b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return xxx_messageInfo_Counter.Size(m) +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *Counter) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{3} +} + +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantile.Unmarshal(m, b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) +} +func (m *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(m, src) +} +func (m *Quantile) XXX_Size() int { + return xxx_messageInfo_Quantile.Size(m) +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{4} +} + +func (m *Summary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary.Unmarshal(m, b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) +} +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) +} +func (m *Summary) XXX_Size() int { + return xxx_messageInfo_Summary.Size(m) +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{5} +} + +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Untyped.Unmarshal(m, b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) +} +func (m *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(m, src) +} +func (m *Untyped) XXX_Size() int { + return xxx_messageInfo_Untyped.Size(m) +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{6} +} + +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Histogram.Unmarshal(m, b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return xxx_messageInfo_Histogram.Size(m) +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{7} +} + +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +func (m *Bucket) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Exemplar struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{8} +} + +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Exemplar.Unmarshal(m, b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return xxx_messageInfo_Exemplar.Size(m) +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{9} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{10} +} + +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricFamily.Unmarshal(m, b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) +} +func (m *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(m, src) +} +func (m *MetricFamily) XXX_Size() int { + return xxx_messageInfo_MetricFamily.Size(m) +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") +} + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } + +var fileDescriptor_6039342a2ba47b72 = []byte{ + // 665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, + 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, + 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, + 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, + 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, + 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, + 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, + 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, + 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, + 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, + 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, + 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, + 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, + 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, + 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, + 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, + 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, + 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, + 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, + 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, + 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, + 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, + 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, + 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, + 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, + 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, + 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, + 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, + 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, + 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, + 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, + 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, + 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, + 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, + 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, + 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, + 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, + 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, + 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, + 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 0000000000..636a2c1a5e --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 0000000000..c092723e84 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurrs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occurred. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 0000000000..bd4e347454 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,162 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +// Closer is implemented by Encoders that need to be closed to finalize +// encoding. (For example, OpenMetrics needs a final `# EOF` line.) +// +// Note that all Encoder implementations returned from this package implement +// Closer, too, even if the Close call is a no-op. This happens in preparation +// for adding a Close method to the Encoder interface directly in a (mildly +// breaking) release in the future. +type Closer interface { + Close() error +} + +type encoderCloser struct { + encode func(*dto.MetricFamily) error + close func() error +} + +func (ec encoderCloser) Encode(v *dto.MetricFamily) error { + return ec.encode(v) +} + +func (ec encoderCloser) Close() error { + return ec.close() +} + +// Negotiate returns the Content-Type based on the given Accept header. If no +// appropriate accepted type is found, FmtText is returned (which is the +// Prometheus text format). This function will never negotiate FmtOpenMetrics, +// as the support is still experimental. To include the option to negotiate +// FmtOpenMetrics, use NegotiateOpenMetrics. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + ver := ac.Params["version"] + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NegotiateIncludingOpenMetrics works like Negotiate but includes +// FmtOpenMetrics as an option for the result. Note that this function is +// temporary and will disappear once FmtOpenMetrics is fully supported and as +// such may be negotiated by the normal Negotiate function. +func NegotiateIncludingOpenMetrics(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + ver := ac.Params["version"] + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { + return FmtOpenMetrics + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. All +// Encoder implementations returned by NewEncoder also implement Closer, and +// callers should always call the Close method. It is currently only required +// for FmtOpenMetrics, but a future (breaking) release will add the Close method +// to the Encoder interface directly. The current version of the Encoder +// interface is kept for backwards compatibility. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }, + close: func() error { return nil }, + } + case FmtProtoCompact: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }, + close: func() error { return nil }, + } + case FmtProtoText: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }, + close: func() error { return nil }, + } + case FmtText: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }, + close: func() error { return nil }, + } + case FmtOpenMetrics: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToOpenMetrics(w, v) + return err + }, + close: func() error { + _, err := FinalizeOpenMetrics(w) + return err + }, + } + } + panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 0000000000..0f176fa64f --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,41 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion = "0.0.1" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 0000000000..dc2eedeefc --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go new file mode 100644 index 0000000000..8a9313a3be --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -0,0 +1,527 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + "github.com/golang/protobuf/ptypes" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the +// OpenMetrics text format and writes the resulting lines to 'out'. It returns +// the number of bytes written and any error encountered. The output will have +// the same order as the input, no further sorting is performed. Furthermore, +// this function assumes the input is already sanitized and does not perform any +// sanity checks. If the input contains duplicate metrics or invalid metric or +// label names, the conversion will result in invalid text format output. +// +// This function fulfills the type 'expfmt.encoder'. +// +// Note that OpenMetrics requires a final `# EOF` line. Since this function acts +// on individual metric families, it is the responsibility of the caller to +// append this line to 'out' once all metric families have been written. +// Conveniently, this can be done by calling FinalizeOpenMetrics. +// +// The output should be fully OpenMetrics compliant. However, there are a few +// missing features and peculiarities to avoid complications when switching from +// Prometheus to OpenMetrics or vice versa: +// +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. +// +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. +// +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). +// +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var ( + n int + metricType = in.GetType() + shortName = name + ) + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { + shortName = name[:len(name)-6] + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + switch metricType { + case dto.MetricType_COUNTER: + if strings.HasSuffix(name, "_total") { + n, err = w.WriteString(" counter\n") + } else { + n, err = w.WriteString(" unknown\n") + } + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" unknown\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + // Note that we have ensured above that either the name + // ends on `_total` or that the rendered type is + // `unknown`. Therefore, no `_total` must be added here. + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), 0, false, + metric.Counter.Exemplar, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), 0, false, + nil, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), 0, false, + nil, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeOpenMetricsSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Summary.GetSampleCount(), true, + nil, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + 0, b.GetCumulativeCount(), true, + b.Exemplar, + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. +func FinalizeOpenMetrics(w io.Writer) (written int, err error) { + return w.Write([]byte("# EOF\n")) +} + +// writeOpenMetricsSample writes a single sample in OpenMetrics text format to +// w, given the metric name, the metric proto message itself, optionally an +// additional label name with a float64 value (use empty string as label name if +// not required), the value (optionally as float64 or uint64, determined by +// useIntValue), and optionally an exemplar (use nil if not required). The +// function returns the number of bytes written and any error encountered. +func writeOpenMetricsSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + floatValue float64, intValue uint64, useIntValue bool, + exemplar *dto.Exemplar, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeOpenMetricsLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + if useIntValue { + n, err = writeUint(w, intValue) + } else { + n, err = writeOpenMetricsFloat(w, floatValue) + } + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly without converting to a float first. + n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) + written += n + if err != nil { + return written, err + } + } + if exemplar != nil { + n, err = writeExemplar(w, exemplar) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float +// in OpenMetrics style. +func writeOpenMetricsLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeExemplar writes the provided exemplar in OpenMetrics format to w. The +// function returns the number of bytes written and any error encountered. +func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { + written := 0 + n, err := w.WriteString(" # ") + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, e.GetValue()) + written += n + if err != nil { + return written, err + } + if e.Timestamp != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + ts, err := ptypes.Timestamp((*e).Timestamp) + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting +// number would otherwise contain neither a "." nor an "e". +func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return w.WriteString("1.0") + case f == 0: + return w.WriteString("0.0") + case f == -1: + return w.WriteString("-1.0") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + if !bytes.ContainsAny(*bp, "e.") { + *bp = append(*bp, '.', '0') + } + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeUint is like writeInt just for uint64. +func writeUint(w enhancedWriter, u uint64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendUint((*bp)[:0], u, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 0000000000..5ba503b065 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,465 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "math" + "strconv" + "strings" + "sync" + + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// enhancedWriter has all the enhanced write functions needed here. bufio.Writer +// implements it. +type enhancedWriter interface { + io.Writer + WriteRune(r rune) (n int, err error) + WriteString(s string) (n int, err error) + WriteByte(c byte) error +} + +const ( + initialNumBufSize = 24 +) + +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriter(ioutil.Discard) + }, + } + numBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, initialNumBufSize) + return &b + }, + } +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { + // Fail-fast checks. + if len(in.Metric) == 0 { + return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var n int + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, false) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + metricType := in.GetType() + switch metricType { + case dto.MetricType_COUNTER: + n, err = w.WriteString(" counter\n") + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" untyped\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Summary.GetSampleCount()), + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + float64(b.GetCumulativeCount()), + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + float64(metric.Histogram.GetSampleCount()), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Histogram.GetSampleCount()), + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// writeSample writes a single sample in text format to w, given the metric +// name, the metric proto message itself, optionally an additional label name +// with a float64 value (use empty string as label name if not required), and +// the value. The function returns the number of bytes written and any error +// encountered. +func writeSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + value float64, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeFloat(w, value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeInt(w, *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'w'. An empty slice in combination with an empty +// string 'additionalLabelName' results in nothing being written. Otherwise, the +// label pairs are written, escaped as required by the text format, and enclosed +// in '{...}'. The function returns the number of bytes written and any error +// encountered. +func writeLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +var ( + escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) + quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { + if includeDoubleQuote { + return quotedEscaper.WriteString(w, v) + } + return escaper.WriteString(w, v) +} + +// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes +// a few common cases for increased efficiency. For non-hardcoded cases, it uses +// strconv.AppendFloat to avoid allocations, similar to writeInt. +func writeFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return 1, w.WriteByte('1') + case f == 0: + return 1, w.WriteByte('0') + case f == -1: + return w.WriteString("-1") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeInt is equivalent to fmt.Fprint with an int64 argument but uses +// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid +// allocations. +func writeInt(w enhancedWriter, i int64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendInt((*bp)[:0], i, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 0000000000..342e5940d0 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,764 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := parseFloat(p.currentToken.String()) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' translates into '\', and '\n' into a line-feed character. +// All other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} + +func parseFloat(s string) (float64, error) { + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt new file mode 100644 index 0000000000..7723656d58 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 0000000000..26e92288c7 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 0000000000..35e739c7ad --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 0000000000..fc4de4106e --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 0000000000..038fc1c900 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 0000000000..41051a01a3 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 0000000000..6eda08a739 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 0000000000..00804b7fed --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,102 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 0000000000..a7b9691707 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 0000000000..8762b13c63 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 0000000000..bb99889d2c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definition in the Prometheus +// eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 0000000000..490a0240c1 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,274 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes an interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + // If the value was something like -0.1 the negative is lost in the + // parsing because of the leading zero, this ensures that we capture it. + if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { + *t = Time(v+va) * -1 + } else { + *t = Time(v + va) + } + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// ParseDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + // Allow 0 without a unit. + if durationStr == "0" { + return 0, nil + } + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + if ms == 0 { + return "0s" + } + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 0000000000..c9d8fb1a28 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// semantics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore new file mode 100644 index 0000000000..25e3659ab2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.gitignore @@ -0,0 +1 @@ +/fixtures/ diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml new file mode 100644 index 0000000000..0aa09edacb --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -0,0 +1,4 @@ +--- +linters: + enable: + - golint diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9a1aff4127 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +## Prometheus Community Code of Conduct + +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md new file mode 100644 index 0000000000..943de7615e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -0,0 +1,121 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) a suitable maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). + +* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) + +## Steps to Contribute + +Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. + +Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). + +For quickly compiling and testing your changes do: +``` +make test # Make sure all the tests pass before you commit and push :) +``` + +We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. + +## Pull Request Checklist + +* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. + +* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). + +* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). + +* Add tests relevant to the fixed bug or new feature. + +## Dependency management + +The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed. + +All dependencies are vendored in the `vendor/` directory. + +To add or update a new dependency, use the `go get` command: + +```bash +# Pick the latest tagged release. +go get example.com/some/module/pkg + +# Pick a specific version. +go get example.com/some/module/pkg@vX.Y.Z +``` + +Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory: + + +```bash +# The GO111MODULE variable can be omitted when the code isn't located in GOPATH. +GO111MODULE=on go mod tidy + +GO111MODULE=on go mod vendor +``` + +You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request. + + +## API Implementation Guidelines + +### Naming and Documentation + +Public functions and structs should normally be named according to the file(s) being read and parsed. For example, +the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function +should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s). + +### Reading vs. Parsing + +Most functionality in this library consists of reading files and then parsing the text into structured data. In most +cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and +a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested +directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types +such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files +in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead. + +### /proc and /sys filesystem I/O + +The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O. +Many of the files are changing continuously and the data being read can in some cases change between subsequent +reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls +to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the +full file in a single operation using an internal utility function called `util.ReadFileNoStat`. +This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of +the file. + +Note that parsing the file's contents can still be performed one line at a time. This is done by first reading +the full file, and then using a scanner on the `[]byte` or `string` containing the data. + +``` + data, err := util.ReadFileNoStat("/proc/cpuinfo") + if err != nil { + return err + } + reader := bytes.NewReader(data) + scanner := bufio.NewScanner(reader) +``` + +The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files +can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does +not bother to check the size of the file before reading. +``` + data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") +``` + diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md new file mode 100644 index 0000000000..56ba67d3e3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -0,0 +1,2 @@ +* Johannes 'fish' Ziemke @discordianfish +* Paul Gier @pgier diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile new file mode 100644 index 0000000000..616a0d25eb --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -0,0 +1,29 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include Makefile.common + +%/.unpacked: %.ttar + @echo ">> extracting fixtures" + ./ttar -C $(dir $*) -x -f $*.ttar + touch $@ + +update_fixtures: + rm -vf fixtures/.unpacked + ./ttar -c -f fixtures.ttar fixtures/ + +.PHONY: build +build: + +.PHONY: test +test: fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common new file mode 100644 index 0000000000..9320176ca2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -0,0 +1,300 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOOPTS ?= +GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) +GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) + +GO_VERSION ?= $(shell $(GO) version) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') + +GOVENDOR := +GO111MODULE := +ifeq (, $(PRE_GO_111)) + ifneq (,$(wildcard go.mod)) + # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). + GO111MODULE := on + + ifneq (,$(wildcard vendor)) + # Always use the local vendor/ directory to satisfy the dependencies. + GOOPTS := $(GOOPTS) -mod=vendor + endif + endif +else + ifneq (,$(wildcard go.mod)) + ifneq (,$(wildcard vendor)) +$(warning This repository requires Go >= 1.11 because of Go modules) +$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') + endif + else + # This repository isn't using Go modules (yet). + GOVENDOR := $(FIRST_GOPATH)/bin/govendor + endif +endif +PROMU := $(FIRST_GOPATH)/bin/promu +pkgs = ./... + +ifeq (arm, $(GOHOSTARCH)) + GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) +else + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) +endif + +GOTEST := $(GO) test +GOTEST_DIR := +ifneq ($(CIRCLE_JOB),) +ifneq ($(shell which gotestsum),) + GOTEST_DIR := test-results + GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- +endif +endif + +PROMU_VERSION ?= 0.5.0 +PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz + +GOLANGCI_LINT := +GOLANGCI_LINT_OPTS ?= +GOLANGCI_LINT_VERSION ?= v1.18.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +# windows isn't included here because of the path separator being different. +ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif +endif + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKERFILE_PATH ?= ./Dockerfile +DOCKERBUILD_CONTEXT ?= ./ +DOCKER_REPO ?= prom + +DOCKER_ARCHS ?= amd64 + +BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) +PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) +TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) + +ifeq ($(GOHOSTARCH),amd64) + ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) + # Only supported on amd64 + test-flags := -race + endif +endif + +# This rule is used to forward a target like "build" to "common-build". This +# allows a new "build" target to be defined in a Makefile which includes this +# one and override "common-build" without override warnings. +%: common-% ; + +.PHONY: common-all +common-all: precheck style check_license lint unused build test + +.PHONY: common-style +common-style: + @echo ">> checking code style" + @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ + if [ -n "$${fmtRes}" ]; then \ + echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ + echo "Please ensure you are using $$($(GO) version) for formatting code."; \ + exit 1; \ + fi + +.PHONY: common-check_license +common-check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: common-deps +common-deps: + @echo ">> getting dependencies" +ifdef GO111MODULE + GO111MODULE=$(GO111MODULE) $(GO) mod download +else + $(GO) get $(GOOPTS) -t ./... +endif + +.PHONY: update-go-deps +update-go-deps: + @echo ">> updating Go dependencies" + @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ + $(GO) get $$m; \ + done + GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifneq (,$(wildcard vendor)) + GO111MODULE=$(GO111MODULE) $(GO) mod vendor +endif + +.PHONY: common-test-short +common-test-short: $(GOTEST_DIR) + @echo ">> running short tests" + GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) + +.PHONY: common-test +common-test: $(GOTEST_DIR) + @echo ">> running all tests" + GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + +$(GOTEST_DIR): + @mkdir -p $@ + +.PHONY: common-format +common-format: + @echo ">> formatting code" + GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) + +.PHONY: common-vet +common-vet: + @echo ">> vetting code" + GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + +.PHONY: common-lint +common-lint: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint" +ifdef GO111MODULE +# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. +# Otherwise staticcheck might fail randomly for some reason not yet explained. + GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) +else + $(GOLANGCI_LINT) run $(pkgs) +endif +endif + +# For backward-compatibility. +.PHONY: common-staticcheck +common-staticcheck: lint + +.PHONY: common-unused +common-unused: $(GOVENDOR) +ifdef GOVENDOR + @echo ">> running check for unused packages" + @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' +else +ifdef GO111MODULE + @echo ">> running check for unused/missing packages in go.mod" + GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifeq (,$(wildcard vendor)) + @git diff --exit-code -- go.sum go.mod +else + @echo ">> running check for unused packages in vendor/" + GO111MODULE=$(GO111MODULE) $(GO) mod vendor + @git diff --exit-code -- go.sum go.mod vendor/ +endif +endif +endif + +.PHONY: common-build +common-build: promu + @echo ">> building binaries" + GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + +.PHONY: common-tarball +common-tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +.PHONY: common-docker $(BUILD_DOCKER_ARCHS) +common-docker: $(BUILD_DOCKER_ARCHS) +$(BUILD_DOCKER_ARCHS): common-docker-%: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + -f $(DOCKERFILE_PATH) \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + $(DOCKERBUILD_CONTEXT) + +.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) +common-docker-publish: $(PUBLISH_DOCKER_ARCHS) +$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + +.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) +common-docker-tag-latest: $(TAG_DOCKER_ARCHS) +$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + +.PHONY: common-docker-manifest +common-docker-manifest: + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + +.PHONY: promu +promu: $(PROMU) + +$(PROMU): + $(eval PROMU_TMP := $(shell mktemp -d)) + curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) + mkdir -p $(FIRST_GOPATH)/bin + cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu + rm -r $(PROMU_TMP) + +.PHONY: proto +proto: + @echo ">> generating code from proto files" + @./scripts/genproto.sh + +ifdef GOLANGCI_LINT +$(GOLANGCI_LINT): + mkdir -p $(FIRST_GOPATH)/bin + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ + | sed -e '/install -d/d' \ + | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) +endif + +ifdef GOVENDOR +.PHONY: $(GOVENDOR) +$(GOVENDOR): + GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor +endif + +.PHONY: precheck +precheck:: + +define PRECHECK_COMMAND_template = +precheck:: $(1)_precheck + +PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) +.PHONY: $(1)_precheck +$(1)_precheck: + @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ + echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ + exit 1; \ + fi +endef diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 0000000000..53c5e9aa11 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md new file mode 100644 index 0000000000..55d1e3261c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/README.md @@ -0,0 +1,61 @@ +# procfs + +This package provides functions to retrieve system, kernel, and process +metrics from the pseudo-filesystems /proc and /sys. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) +[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) + +## Usage + +The procfs library is organized by packages based on whether the gathered data is coming from +/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, +/sys, or both. For example, cpu statistics are gathered from +`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount +point is initialized, and then the stat information is read. + +```go +fs, err := procfs.NewFS("/proc") +stats, err := fs.Stat() +``` + +Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. + +```go + fs, err := blockdevice.NewFS("/proc", "/sys") + stats, err := fs.ProcDiskstats() +``` + +## Package Organization + +The packages in this project are organized according to (1) whether the data comes from the `/proc` or +`/sys` filesystem and (2) the type of information being retrieved. For example, most process information +can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives +is available in the `blockdevices` sub-package. + +## Building and Testing + +The procfs library is intended to be built as part of another application, so there are no distributable binaries. +However, most of the API includes unit tests which can be run with `make test`. + +### Updating Test Fixtures + +The procfs library includes a set of test fixtures which include many example files from +the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file +which is extracted automatically during testing. To add/update the test fixtures, first +ensure the `fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make fixtures/.unpacked` or just `make test`. + +```bash +rm -rf fixtures +make test +``` + +Next, make the required changes to the extracted files in the `fixtures` directory. When +the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file +based on the updated `fixtures` directory. And finally, verify the changes using +`git diff fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go new file mode 100644 index 0000000000..916c9182a8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "net" + "strings" +) + +// ARPEntry contains a single row of the columnar data represented in +// /proc/net/arp. +type ARPEntry struct { + // IP address + IPAddr net.IP + // MAC address + HWAddr net.HardwareAddr + // Name of the device + Device string +} + +// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, +// and then return a slice of ARPEntry's. +func (fs FS) GatherARPEntries() ([]ARPEntry, error) { + data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) + if err != nil { + return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err) + } + + return parseARPEntries(data) +} + +func parseARPEntries(data []byte) ([]ARPEntry, error) { + lines := strings.Split(string(data), "\n") + entries := make([]ARPEntry, 0) + var err error + const ( + expectedDataWidth = 6 + expectedHeaderWidth = 9 + ) + for _, line := range lines { + columns := strings.Fields(line) + width := len(columns) + + if width == expectedHeaderWidth || width == 0 { + continue + } else if width == expectedDataWidth { + entry, err := parseARPEntry(columns) + if err != nil { + return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err) + } + entries = append(entries, entry) + } else { + return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + } + + } + + return entries, err +} + +func parseARPEntry(columns []string) (ARPEntry, error) { + ip := net.ParseIP(columns[0]) + mac := net.HardwareAddr(columns[3]) + + entry := ARPEntry{ + IPAddr: ip, + HWAddr: mac, + Device: columns[5], + } + + return entry, nil +} diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 0000000000..10bd067a0a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,85 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) BuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.proc.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go new file mode 100644 index 0000000000..b9fb589aa1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -0,0 +1,464 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo +type CPUInfo struct { + Processor uint + VendorID string + CPUFamily string + Model string + ModelName string + Stepping string + Microcode string + CPUMHz float64 + CacheSize string + PhysicalID string + Siblings uint + CoreID string + CPUCores uint + APICID string + InitialAPICID string + FPU string + FPUException string + CPUIDLevel uint + WP string + Flags []string + Bugs []string + BogoMips float64 + CLFlushSize uint + CacheAlignment uint + AddressSizes string + PowerManagement string +} + +var ( + cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`) + cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`) +) + +// CPUInfo returns information about current system CPUs. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) CPUInfo() ([]CPUInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) + if err != nil { + return nil, err + } + return parseCPUInfo(data) +} + +func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "vendor", "vendor_id": + cpuinfo[i].VendorID = field[1] + case "cpu family": + cpuinfo[i].CPUFamily = field[1] + case "model": + cpuinfo[i].Model = field[1] + case "model name": + cpuinfo[i].ModelName = field[1] + case "stepping": + cpuinfo[i].Stepping = field[1] + case "microcode": + cpuinfo[i].Microcode = field[1] + case "cpu MHz": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + case "cache size": + cpuinfo[i].CacheSize = field[1] + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "apicid": + cpuinfo[i].APICID = field[1] + case "initial apicid": + cpuinfo[i].InitialAPICID = field[1] + case "fpu": + cpuinfo[i].FPU = field[1] + case "fpu_exception": + cpuinfo[i].FPUException = field[1] + case "cpuid level": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUIDLevel = uint(v) + case "wp": + cpuinfo[i].WP = field[1] + case "flags": + cpuinfo[i].Flags = strings.Fields(field[1]) + case "bugs": + cpuinfo[i].Bugs = strings.Fields(field[1]) + case "bogomips": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "clflush size": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CLFlushSize = uint(v) + case "cache_alignment": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CacheAlignment = uint(v) + case "address sizes": + cpuinfo[i].AddressSizes = field[1] + case "power management": + cpuinfo[i].PowerManagement = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) + if !match || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + featuresLine := "" + commonCPUInfo := CPUInfo{} + i := 0 + if strings.TrimSpace(field[0]) == "Processor" { + commonCPUInfo = CPUInfo{ModelName: field[1]} + i = -1 + } else { + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo = []CPUInfo{firstcpu} + } + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "BogoMIPS": + if i == -1 { + cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor + i++ + cpuinfo[i].Processor = 0 + } + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "Features": + featuresLine = line + case "model name": + cpuinfo[i].ModelName = field[1] + } + } + fields := strings.SplitN(featuresLine, ": ", 2) + for i := range cpuinfo { + cpuinfo[i].Flags = strings.Fields(fields[1]) + } + return cpuinfo, nil + +} + +func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + commonCPUInfo := CPUInfo{VendorID: field[1]} + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "bogomips per cpu": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + commonCPUInfo.BogoMips = v + case "features": + commonCPUInfo.Flags = strings.Fields(field[1]) + } + if strings.HasPrefix(line, "processor") { + match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) + if len(match) < 2 { + return nil, errors.New("Invalid line found in cpuinfo: " + line) + } + cpu := commonCPUInfo + v, err := strconv.ParseUint(match[1], 0, 32) + if err != nil { + return nil, err + } + cpu.Processor = uint(v) + cpuinfo = append(cpuinfo, cpu) + } + if strings.HasPrefix(line, "cpu number") { + break + } + } + + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "cpu number": + i++ + case "cpu MHz dynamic": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + } + } + + return cpuinfo, nil +} + +func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "cpu model": + cpuinfo[i].ModelName = field[1] + case "BogoMIPS": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + } + } + return cpuinfo, nil +} + +func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "cpu": + cpuinfo[i].VendorID = field[1] + case "clock": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + } + } + return cpuinfo, nil +} + +func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + case "hart": + cpuinfo[i].CoreID = field[1] + case "isa": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode + return nil, errors.New("not implemented") +} + +// firstNonEmptyLine advances the scanner to the first non-empty line +// and returns the contents of that line +func firstNonEmptyLine(scanner *bufio.Scanner) string { + for scanner.Scan() { + line := scanner.Text() + if strings.TrimSpace(line) != "" { + return line + } + } + return "" +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go new file mode 100644 index 0000000000..44b590ed38 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build arm arm64 + +package procfs + +var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go new file mode 100644 index 0000000000..91e272573a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build mips mipsle mips64 mips64le + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go new file mode 100644 index 0000000000..95b5b4ec44 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x + +package procfs + +var parseCPUInfo = parseCPUInfoDummy diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go new file mode 100644 index 0000000000..6068bd571c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build ppc64 ppc64le + +package procfs + +var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go new file mode 100644 index 0000000000..26814eebaa --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoS390X diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go new file mode 100644 index 0000000000..d5bedf97f3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build 386 amd64 + +package procfs + +var parseCPUInfo = parseCPUInfoX86 diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go new file mode 100644 index 0000000000..a958933757 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -0,0 +1,153 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Crypto holds info parsed from /proc/crypto. +type Crypto struct { + Alignmask *uint64 + Async bool + Blocksize *uint64 + Chunksize *uint64 + Ctxsize *uint64 + Digestsize *uint64 + Driver string + Geniv string + Internal string + Ivsize *uint64 + Maxauthsize *uint64 + MaxKeysize *uint64 + MinKeysize *uint64 + Module string + Name string + Priority *int64 + Refcnt *int64 + Seedsize *uint64 + Selftest string + Type string + Walksize *uint64 +} + +// Crypto parses an crypto-file (/proc/crypto) and returns a slice of +// structs containing the relevant info. More information available here: +// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html +func (fs FS) Crypto() ([]Crypto, error) { + path := fs.proc.Path("crypto") + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, fmt.Errorf("error reading crypto %s: %s", path, err) + } + + crypto, err := parseCrypto(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("error parsing crypto %s: %s", path, err) + } + + return crypto, nil +} + +// parseCrypto parses a /proc/crypto stream into Crypto elements. +func parseCrypto(r io.Reader) ([]Crypto, error) { + var out []Crypto + + s := bufio.NewScanner(r) + for s.Scan() { + text := s.Text() + switch { + case strings.HasPrefix(text, "name"): + // Each crypto element begins with its name. + out = append(out, Crypto{}) + case text == "": + continue + } + + kv := strings.Split(text, ":") + if len(kv) != 2 { + return nil, fmt.Errorf("malformed crypto line: %q", text) + } + + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + + // Parse the key/value pair into the currently focused element. + c := &out[len(out)-1] + if err := c.parseKV(k, v); err != nil { + return nil, err + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +// parseKV parses a key/value pair into the appropriate field of c. +func (c *Crypto) parseKV(k, v string) error { + vp := util.NewValueParser(v) + + switch k { + case "async": + // Interpret literal yes as true. + c.Async = v == "yes" + case "blocksize": + c.Blocksize = vp.PUInt64() + case "chunksize": + c.Chunksize = vp.PUInt64() + case "digestsize": + c.Digestsize = vp.PUInt64() + case "driver": + c.Driver = v + case "geniv": + c.Geniv = v + case "internal": + c.Internal = v + case "ivsize": + c.Ivsize = vp.PUInt64() + case "maxauthsize": + c.Maxauthsize = vp.PUInt64() + case "max keysize": + c.MaxKeysize = vp.PUInt64() + case "min keysize": + c.MinKeysize = vp.PUInt64() + case "module": + c.Module = v + case "name": + c.Name = v + case "priority": + c.Priority = vp.PInt64() + case "refcnt": + c.Refcnt = vp.PInt64() + case "seedsize": + c.Seedsize = vp.PUInt64() + case "selftest": + c.Selftest = v + case "type": + c.Type = v + case "walksize": + c.Walksize = vp.PUInt64() + } + + return vp.Err() +} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 0000000000..e2acd6d40a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar new file mode 100644 index 0000000000..12494d7424 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -0,0 +1,6185 @@ +# Archive created by ttar -c -f fixtures.ttar fixtures/ +Directory: fixtures +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26231 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/cmdline +Lines: 1 +vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/comm +Lines: 1 +vim +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/cwd +SymlinkTo: /usr/bin +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/environ +Lines: 1 +PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/exe +SymlinkTo: /usr/bin/vim +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26231/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fd/10 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26231/fdinfo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/0 +Lines: 6 +pos: 0 +flags: 02004000 +mnt_id: 13 +inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000 +inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a +inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/1 +Lines: 4 +pos: 0 +flags: 02004002 +mnt_id: 13 +eventfd-count: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/10 +Lines: 3 +pos: 0 +flags: 02004002 +mnt_id: 9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/2 +Lines: 3 +pos: 0 +flags: 02004002 +mnt_id: 9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/3 +Lines: 3 +pos: 0 +flags: 02004002 +mnt_id: 9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/io +Lines: 7 +rchar: 750339 +wchar: 818609 +syscr: 7405 +syscw: 5245 +read_bytes: 1024 +write_bytes: 2048 +cancelled_write_bytes: -1024 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 62898 62898 processes +Max open files 2048 4096 files +Max locked memory 65536 65536 bytes +Max address space 8589934592 unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 62898 62898 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/mountstats +Lines: 20 +device rootfs mounted on / with fstype rootfs +device sysfs mounted on /sys with fstype sysfs +device proc mounted on /proc with fstype proc +device /dev/sda1 mounted on / with fstype ext4 +device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 + opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none + age: 13968 + caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured + sec: flavor=1,pseudoflavor=1 + events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 + bytes: 1207640230 0 0 0 1210214218 0 295483 0 + RPC iostats version: 1.0 p/v: 100003/4 (nfs) + xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + READ: 1298 1298 0 207680 1210292152 6 79386 79407 + WRITE: 0 0 0 0 0 0 0 0 + ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26231/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/net/dev +Lines: 4 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26231/ns +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/ns/mnt +SymlinkTo: mnt:[4026531840] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/ns/net +SymlinkTo: net:[4026531993] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/root +SymlinkTo: / +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/schedstat +Lines: 1 +411605849 93680043 79 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/smaps +Lines: 252 +00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager +Size: 8900 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 2952 kB +Pss: 2952 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 2952 kB +Private_Dirty: 0 kB +Referenced: 2864 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex mr mw me dw sd +00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager +Size: 10236 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 6152 kB +Pss: 6152 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 6152 kB +Private_Dirty: 0 kB +Referenced: 5308 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd mr mw me dw sd +016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager +Size: 424 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 176 kB +Pss: 176 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 84 kB +Private_Dirty: 92 kB +Referenced: 176 kB +Anonymous: 92 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 12 kB +SwapPss: 12 kB +Locked: 0 kB +VmFlags: rd wr mr mw me dw ac sd +0171a000-0173f000 rw-p 00000000 00:00 0 +Size: 148 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 76 kB +Pss: 76 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 76 kB +Referenced: 76 kB +Anonymous: 76 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +c000000000-c000400000 rw-p 00000000 00:00 0 +Size: 4096 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 2564 kB +Pss: 2564 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 20 kB +Private_Dirty: 2544 kB +Referenced: 2544 kB +Anonymous: 2564 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 1100 kB +SwapPss: 1100 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +c000400000-c001600000 rw-p 00000000 00:00 0 +Size: 18432 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 16024 kB +Pss: 16024 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 5864 kB +Private_Dirty: 10160 kB +Referenced: 11944 kB +Anonymous: 16024 kB +LazyFree: 5848 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 440 kB +SwapPss: 440 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd nh +c001600000-c004000000 rw-p 00000000 00:00 0 +Size: 43008 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 +Size: 38596 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 1992 kB +Pss: 1992 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 476 kB +Private_Dirty: 1516 kB +Referenced: 1828 kB +Anonymous: 1992 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 384 kB +SwapPss: 384 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack] +Size: 132 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 8 kB +Pss: 8 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 8 kB +Referenced: 8 kB +Anonymous: 8 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 4 kB +SwapPss: 4 kB +Locked: 0 kB +VmFlags: rd wr mr mw me gd ac +7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar] +Size: 12 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd mr pf io de dd sd +7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso] +Size: 8 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 4 kB +Pss: 0 kB +Shared_Clean: 4 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 4 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex mr mw me de sd +ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] +Size: 4 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/smaps_rollup +Lines: 17 +00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup] +Rss: 29948 kB +Pss: 29944 kB +Shared_Clean: 4 kB +Shared_Dirty: 0 kB +Private_Clean: 15548 kB +Private_Dirty: 14396 kB +Referenced: 24752 kB +Anonymous: 20756 kB +LazyFree: 5848 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 1940 kB +SwapPss: 1940 kB +Locked: 0 kB +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/stat +Lines: 1 +26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/status +Lines: 53 + +Name: prometheus +Umask: 0022 +State: S (sleeping) +Tgid: 26231 +Ngid: 0 +Pid: 26231 +PPid: 1 +TracerPid: 0 +Uid: 1000 1000 1000 0 +Gid: 1001 1001 1001 0 +FDSize: 128 +Groups: +NStgid: 1 +NSpid: 1 +NSpgid: 1 +NSsid: 1 +VmPeak: 58472 kB +VmSize: 58440 kB +VmLck: 0 kB +VmPin: 0 kB +VmHWM: 8028 kB +VmRSS: 6716 kB +RssAnon: 2092 kB +RssFile: 4624 kB +RssShmem: 0 kB +VmData: 2580 kB +VmStk: 136 kB +VmExe: 948 kB +VmLib: 6816 kB +VmPTE: 128 kB +VmPMD: 12 kB +VmSwap: 660 kB +HugetlbPages: 0 kB +Threads: 1 +SigQ: 8/63965 +SigPnd: 0000000000000000 +ShdPnd: 0000000000000000 +SigBlk: 7be3c0fe28014a03 +SigIgn: 0000000000001000 +SigCgt: 00000001800004ec +CapInh: 0000000000000000 +CapPrm: 0000003fffffffff +CapEff: 0000003fffffffff +CapBnd: 0000003fffffffff +CapAmb: 0000000000000000 +Seccomp: 0 +Cpus_allowed: ff +Cpus_allowed_list: 0-7 +Mems_allowed: 00000000,00000001 +Mems_allowed_list: 0 +voluntary_ctxt_switches: 4742839 +nonvoluntary_ctxt_switches: 1727500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/wchan +Lines: 1 +poll_schedule_timeoutEOF +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26232 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/cmdline +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/comm +Lines: 1 +ata_sff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/cwd +SymlinkTo: /does/not/exist +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26232/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/fd/4 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 29436 29436 processes +Max open files 1024 4096 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 29436 29436 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/maps +Lines: 9 +55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat +55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat +55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap] +7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive +7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so +7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack] +7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar] +7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso] +ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/root +SymlinkTo: /does/not/exist +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/stat +Lines: 1 +33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/wchan +Lines: 1 +0EOF +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26233 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26233/cmdline +Lines: 1 +com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26233/schedstat +Lines: 8 + ____________________________________ +< this is a malformed schedstat file > + ------------------------------------ + \ ^__^ + \ (oo)\_______ + (__)\ )\/\ + ||----w | + || || +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26234 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26234/maps +Lines: 4 +08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh +08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh +0808c000-08146000 rwxp 00000000 00:00 0 +40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/584 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/584/stat +Lines: 2 +1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 +#!/bin/cat /proc/self/stat +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/cpuinfo +Lines: 216 +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 799.998 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.037 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.010 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 2 +cpu cores : 4 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.028 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 799.989 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.083 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.017 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 2 +cpu cores : 4 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.030 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/crypto +Lines: 972 +name : ccm(aes) +driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) +module : ccm +priority : 300 +refcnt : 4 +selftest : passed +internal : no +type : aead +async : no +blocksize : 1 +ivsize : 16 +maxauthsize : 16 +geniv : + +name : cbcmac(aes) +driver : cbcmac(aes-aesni) +module : ccm +priority : 300 +refcnt : 7 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 16 + +name : ecdh +driver : ecdh-generic +module : ecdh_generic +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : kpp +async : yes + +name : ecb(arc4) +driver : ecb(arc4)-generic +module : arc4 +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 1 +min keysize : 1 +max keysize : 256 +ivsize : 0 +chunksize : 1 +walksize : 1 + +name : arc4 +driver : arc4-generic +module : arc4 +priority : 0 +refcnt : 3 +selftest : passed +internal : no +type : cipher +blocksize : 1 +min keysize : 1 +max keysize : 256 + +name : crct10dif +driver : crct10dif-pclmul +module : crct10dif_pclmul +priority : 200 +refcnt : 2 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 2 + +name : crc32 +driver : crc32-pclmul +module : crc32_pclmul +priority : 200 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 4 + +name : __ghash +driver : cryptd(__ghash-pclmulqdqni) +module : kernel +priority : 50 +refcnt : 1 +selftest : passed +internal : yes +type : ahash +async : yes +blocksize : 16 +digestsize : 16 + +name : ghash +driver : ghash-clmulni +module : ghash_clmulni_intel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : ahash +async : yes +blocksize : 16 +digestsize : 16 + +name : __ghash +driver : __ghash-pclmulqdqni +module : ghash_clmulni_intel +priority : 0 +refcnt : 1 +selftest : passed +internal : yes +type : shash +blocksize : 16 +digestsize : 16 + +name : crc32c +driver : crc32c-intel +module : crc32c_intel +priority : 200 +refcnt : 5 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 4 + +name : cbc(aes) +driver : cbc(aes-aesni) +module : kernel +priority : 300 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : ctr(aes) +driver : ctr(aes-aesni) +module : kernel +priority : 300 +refcnt : 5 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : pkcs1pad(rsa,sha256) +driver : pkcs1pad(rsa-generic,sha256) +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : akcipher + +name : __xts(aes) +driver : cryptd(__xts-aes-aesni) +module : kernel +priority : 451 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 16 +min keysize : 32 +max keysize : 64 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : xts(aes) +driver : xts-aes-aesni +module : kernel +priority : 401 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 16 +min keysize : 32 +max keysize : 64 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ctr(aes) +driver : cryptd(__ctr-aes-aesni) +module : kernel +priority : 450 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : ctr(aes) +driver : ctr-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __cbc(aes) +driver : cryptd(__cbc-aes-aesni) +module : kernel +priority : 450 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : cbc(aes) +driver : cbc-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ecb(aes) +driver : cryptd(__ecb-aes-aesni) +module : kernel +priority : 450 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 0 +chunksize : 16 +walksize : 16 + +name : ecb(aes) +driver : ecb-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 0 +chunksize : 16 +walksize : 16 + +name : __generic-gcm-aes-aesni +driver : cryptd(__driver-generic-gcm-aes-aesni) +module : kernel +priority : 50 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : yes +blocksize : 1 +ivsize : 12 +maxauthsize : 16 +geniv : + +name : gcm(aes) +driver : generic-gcm-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : aead +async : yes +blocksize : 1 +ivsize : 12 +maxauthsize : 16 +geniv : + +name : __generic-gcm-aes-aesni +driver : __driver-generic-gcm-aes-aesni +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : no +blocksize : 1 +ivsize : 12 +maxauthsize : 16 +geniv : + +name : __gcm-aes-aesni +driver : cryptd(__driver-gcm-aes-aesni) +module : kernel +priority : 50 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : yes +blocksize : 1 +ivsize : 8 +maxauthsize : 16 +geniv : + +name : rfc4106(gcm(aes)) +driver : rfc4106-gcm-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : aead +async : yes +blocksize : 1 +ivsize : 8 +maxauthsize : 16 +geniv : + +name : __gcm-aes-aesni +driver : __driver-gcm-aes-aesni +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : no +blocksize : 1 +ivsize : 8 +maxauthsize : 16 +geniv : + +name : __xts(aes) +driver : __xts-aes-aesni +module : kernel +priority : 401 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 16 +min keysize : 32 +max keysize : 64 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ctr(aes) +driver : __ctr-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __cbc(aes) +driver : __cbc-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ecb(aes) +driver : __ecb-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 0 +chunksize : 16 +walksize : 16 + +name : __aes +driver : __aes-aesni +module : kernel +priority : 300 +refcnt : 1 +selftest : passed +internal : yes +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +name : aes +driver : aes-aesni +module : kernel +priority : 300 +refcnt : 8 +selftest : passed +internal : no +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +name : hmac(sha1) +driver : hmac(sha1-generic) +module : kernel +priority : 100 +refcnt : 9 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 20 + +name : ghash +driver : ghash-generic +module : kernel +priority : 100 +refcnt : 3 +selftest : passed +internal : no +type : shash +blocksize : 16 +digestsize : 16 + +name : jitterentropy_rng +driver : jitterentropy_rng +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha256 +module : kernel +priority : 221 +refcnt : 2 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha512 +module : kernel +priority : 220 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha384 +module : kernel +priority : 219 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha1 +module : kernel +priority : 218 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha256 +module : kernel +priority : 217 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha512 +module : kernel +priority : 216 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha384 +module : kernel +priority : 215 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha1 +module : kernel +priority : 214 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_ctr_aes256 +module : kernel +priority : 213 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_ctr_aes192 +module : kernel +priority : 212 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_ctr_aes128 +module : kernel +priority : 211 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : hmac(sha256) +driver : hmac(sha256-generic) +module : kernel +priority : 100 +refcnt : 10 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 32 + +name : stdrng +driver : drbg_pr_hmac_sha256 +module : kernel +priority : 210 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_hmac_sha512 +module : kernel +priority : 209 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_hmac_sha384 +module : kernel +priority : 208 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_hmac_sha1 +module : kernel +priority : 207 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha256 +module : kernel +priority : 206 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha512 +module : kernel +priority : 205 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha384 +module : kernel +priority : 204 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha1 +module : kernel +priority : 203 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_ctr_aes256 +module : kernel +priority : 202 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_ctr_aes192 +module : kernel +priority : 201 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_ctr_aes128 +module : kernel +priority : 200 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : 842 +driver : 842-scomp +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : 842 +driver : 842-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : lzo-rle +driver : lzo-rle-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : lzo-rle +driver : lzo-rle-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : lzo +driver : lzo-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : lzo +driver : lzo-generic +module : kernel +priority : 0 +refcnt : 9 +selftest : passed +internal : no +type : compression + +name : crct10dif +driver : crct10dif-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 2 + +name : crc32c +driver : crc32c-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 4 + +name : zlib-deflate +driver : zlib-deflate-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : deflate +driver : deflate-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : deflate +driver : deflate-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : aes +driver : aes-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +name : sha224 +driver : sha224-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 28 + +name : sha256 +driver : sha256-generic +module : kernel +priority : 100 +refcnt : 11 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 32 + +name : sha1 +driver : sha1-generic +module : kernel +priority : 100 +refcnt : 11 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 20 + +name : md5 +driver : md5-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 16 + +name : ecb(cipher_null) +driver : ecb-cipher_null +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 1 +min keysize : 0 +max keysize : 0 +ivsize : 0 +chunksize : 1 +walksize : 1 + +name : digest_null +driver : digest_null-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 0 + +name : compress_null +driver : compress_null-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : cipher_null +driver : cipher_null-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : cipher +blocksize : 1 +min keysize : 0 +max keysize : 0 + +name : rsa +driver : rsa-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : akcipher + +name : dh +driver : dh-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : kpp + +name : aes +driver : aes-asm +module : kernel +priority : 200 +refcnt : 1 +selftest : passed +internal : no +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/diskstats +Lines: 52 + 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 + 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 + 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 + 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 + 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 + 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 + 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 + 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 + 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 + 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 + 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 + 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 + 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 + 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 + 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 + 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 + 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 + 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 + 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 + 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 + 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 + 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 + 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 + 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 + 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 + 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 + 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 + 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 + 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 + 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 + 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 + 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 + 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 + 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 + 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 + 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 + 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 + 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 + 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 + 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 + 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 + 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 + 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 + 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 + 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 + 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 + 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 + 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 + 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 + 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182 + 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0 + 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/fs/fscache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/fs/fscache/stats +Lines: 24 +FS-Cache statistics +Cookies: idx=3 dat=67877 spc=0 +Objects: alc=67473 nal=0 avl=67473 ded=388 +ChkAux : non=12 ok=33 upd=44 obs=55 +Pages : mrk=547164 unc=364577 +Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26 +Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85 +Invals : n=14 run=13 +Updates: n=7 nul=3 run=8 +Relinqs: n=394 nul=1 wcr=2 rtr=3 +AttrChg: n=6 ok=5 nbf=4 oom=3 run=2 +Allocs : n=20 ok=19 wt=18 nbf=17 int=16 +Allocs : ops=15 owt=14 abt=13 +Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43 +Retrvls: ops=151959 owt=42747 abt=44 +Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14 +Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43 +VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66 +Ops : pend=42753 run=221129 enq=628798 can=11 rej=88 +Ops : ini=377538 dfr=27 rel=377538 gc=37 +CacheOp: alo=1 luo=2 luc=3 gro=4 +CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10 +CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17 +CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/fs/xfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/fs/xfs/stat +Lines: 23 +extent_alloc 92447 97589 92448 93751 +abt 0 0 0 0 +blk_map 1767055 188820 184891 92447 92448 2140766 0 +bmbt 0 0 0 0 +dir 185039 92447 92444 136422 +trans 706 944304 0 +ig 185045 58807 0 126238 0 33637 22 +log 2883 113448 9 17360 739 +push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 +xstrat 92447 0 +rw 107739 94045 +attr 4 0 0 0 +icluster 8677 7849 135802 +vnodes 92601 0 0 0 92444 92444 92444 0 +buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 +abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 +abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 +bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 +fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +qm 0 0 0 0 0 0 0 0 +xpc 399724544 92823103 86219234 +debug 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/loadavg +Lines: 1 +0.02 0.04 0.05 1/497 11947 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/mdstat +Lines: 60 +Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] + +md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) + 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] + +md127 : active raid1 sdi2[0] sdj2[1] + 312319552 blocks [2/2] [UU] + +md0 : active raid1 sdi1[0] sdj1[1] + 248896 blocks [2/2] [UU] + +md4 : inactive raid1 sda3[0](F) sdb3[1](S) + 4883648 blocks [2/2] [UU] + +md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] + 195310144 blocks [2/1] [U_] + [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) + 195310144 blocks [2/2] [UU] + [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md201 : active raid1 sda3[0] sdb3[1] + 1993728 blocks super 1.2 [2/2] [UU] + [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec + +md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) + 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] + bitmap: 0/30 pages [0KB], 65536KB chunk + +md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) + 523968 blocks super 1.2 [4/4] [UUUU] + resync=DELAYED + +md10 : active raid0 sda1[0] sdb1[1] + 314159265 blocks 64k chunks + +md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) + 4190208 blocks super 1.2 [2/2] [UU] + resync=PENDING + +md12 : active raid0 sdc2[0] sdd2[1] + 3886394368 blocks super 1.2 512k chunks + +md126 : active raid0 sdb[1] sdc[0] + 1855870976 blocks super external:/md127/0 128k chunks + +md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) + 7932 blocks super external:imsm + +md00 : active raid0 xvdb[0] + 4186624 blocks super 1.2 256k chunks + +md120 : active linear sda1[1] sdb1[0] + 2095104 blocks super 1.2 0k rounding + +md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] + 322560 blocks super 1.2 512k chunks + +unused devices: +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/meminfo +Lines: 42 +MemTotal: 15666184 kB +MemFree: 440324 kB +Buffers: 1020128 kB +Cached: 12007640 kB +SwapCached: 0 kB +Active: 6761276 kB +Inactive: 6532708 kB +Active(anon): 267256 kB +Inactive(anon): 268 kB +Active(file): 6494020 kB +Inactive(file): 6532440 kB +Unevictable: 0 kB +Mlocked: 0 kB +SwapTotal: 0 kB +SwapFree: 0 kB +Dirty: 768 kB +Writeback: 0 kB +AnonPages: 266216 kB +Mapped: 44204 kB +Shmem: 1308 kB +Slab: 1807264 kB +SReclaimable: 1738124 kB +SUnreclaim: 69140 kB +KernelStack: 1616 kB +PageTables: 5288 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 7833092 kB +Committed_AS: 530844 kB +VmallocTotal: 34359738367 kB +VmallocUsed: 36596 kB +VmallocChunk: 34359637840 kB +HardwareCorrupted: 0 kB +AnonHugePages: 12288 kB +HugePages_Total: 0 +HugePages_Free: 0 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +DirectMap4k: 91136 kB +DirectMap2M: 16039936 kB +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/arp +Lines: 2 +IP address HW type Flags HW address Mask Device +192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/dev +Lines: 6 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed +vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 +docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/ip_vs +Lines: 21 +IP Virtual Server version 1.2.1 (size=4096) +Prot LocalAddress:Port Scheduler Flags + -> RemoteAddress:Port Forward Weight ActiveConn InActConn +TCP C0A80016:0CEA wlc + -> C0A85216:0CEA Tunnel 100 248 2 + -> C0A85318:0CEA Tunnel 100 248 2 + -> C0A85315:0CEA Tunnel 100 248 1 +TCP C0A80039:0CEA wlc + -> C0A85416:0CEA Tunnel 0 0 0 + -> C0A85215:0CEA Tunnel 100 1499 0 + -> C0A83215:0CEA Tunnel 100 1498 0 +TCP C0A80037:0CEA wlc + -> C0A8321A:0CEA Tunnel 0 0 0 + -> C0A83120:0CEA Tunnel 100 0 0 +TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh + -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 +FWM 10001000 wlc + -> C0A8321A:0CEA Route 0 0 1 + -> C0A83215:0CEA Route 0 0 2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/ip_vs_stats +Lines: 6 + Total Incoming Outgoing Incoming Outgoing + Conns Packets Packets Bytes Bytes + 16AA370 E33656E5 0 51D8C8883AB3 0 + + Conns/s Pkts/s Pkts/s Bytes/s Bytes/s + 4 1FB3C 0 1282A8F 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/net/rpc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/rpc/nfs +Lines: 5 +net 18628 0 18628 6 +rpc 4329785 0 4338291 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 +proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/rpc/nfsd +Lines: 11 +rc 0 6 18622 +fh 0 0 0 0 0 +io 157286400 0 +th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 +ra 32 0 0 0 0 0 0 0 0 0 0 0 +net 18628 0 18628 6 +rpc 18628 0 0 0 0 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 +proc4 2 2 10853 +proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/sockstat +Lines: 6 +sockets: used 1602 +TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22 +UDP: inuse 12 mem 62 +UDPLITE: inuse 0 +RAW: inuse 0 +FRAG: inuse 0 memory 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/sockstat6 +Lines: 5 +TCP6: inuse 17 +UDP6: inuse 9 +UDPLITE6: inuse 0 +RAW6: inuse 1 +FRAG6: inuse 0 memory 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/softnet_stat +Lines: 2 +00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/softnet_stat.broken +Lines: 1 +00015c73 00020e76 F0000769 00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/udp +Lines: 4 + sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode + 0: 0A000005:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/udp6 +Lines: 3 + sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops + 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 + 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/udp_broken +Lines: 2 + sl local_address rem_address st + 1: 00000000:0016 00000000:0000 0A +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/unix +Lines: 6 +Num RefCount Protocol Flags Type St Inode Path +0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control +0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log +0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 00000003 00000000 00000000 0001 03 5091797 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/unix_without_inode +Lines: 6 +Num RefCount Protocol Flags Type St Path +0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control +0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log +0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 00000003 00000000 00000000 0001 03 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/xfrm_stat +Lines: 28 +XfrmInError 1 +XfrmInBufferError 2 +XfrmInHdrError 4 +XfrmInNoStates 3 +XfrmInStateProtoError 40 +XfrmInStateModeError 100 +XfrmInStateSeqError 6000 +XfrmInStateExpired 4 +XfrmInStateMismatch 23451 +XfrmInStateInvalid 55555 +XfrmInTmplMismatch 51 +XfrmInNoPols 65432 +XfrmInPolBlock 100 +XfrmInPolError 10000 +XfrmOutError 1000000 +XfrmOutBundleGenError 43321 +XfrmOutBundleCheckError 555 +XfrmOutNoStates 869 +XfrmOutStateProtoError 4542 +XfrmOutStateModeError 4 +XfrmOutStateSeqError 543 +XfrmOutStateExpired 565 +XfrmOutPolBlock 43456 +XfrmOutPolDead 7656 +XfrmOutPolError 1454 +XfrmFwdHdrError 6654 +XfrmOutStateInvalid 28765 +XfrmAcquireError 24532 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/pressure +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/cpu +Lines: 1 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/io +Lines: 2 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +full avg10=0.20 avg60=3.00 avg300=4.95 total=25 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/memory +Lines: 2 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +full avg10=0.20 avg60=3.00 avg300=4.95 total=25 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/schedstat +Lines: 6 +version 15 +timestamp 15819019232 +cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 +domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 +cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 +domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/self +SymlinkTo: 26231 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/stat +Lines: 16 +cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 +cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 +cpu1 47869 23 16474 1110787 591 0 46 0 0 0 +cpu2 46504 36 15916 1112321 441 0 326 0 0 0 +cpu3 47054 102 15683 1113230 533 0 60 0 0 0 +cpu4 28413 25 10776 1140321 217 0 8 0 0 0 +cpu5 29271 101 11586 1136270 672 0 30 0 0 0 +cpu6 29152 36 10276 1139721 319 0 29 0 0 0 +cpu7 29098 268 10164 1139282 555 0 31 0 0 0 +intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 38014093 +btime 1418183276 +processes 26442 +procs_running 2 +procs_blocked 1 +softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/swaps +Lines: 2 +Filename Type Size Used Priority +/dev/dm-2 partition 131068 176 -2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/symlinktargets +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/README +Lines: 2 +This directory contains some empty files that are the symlinks the files in the "fd" directory point to. +They are otherwise ignored by the tests +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/abc +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/def +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/ghi +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/uvw +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/xyz +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/kernel +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/kernel/random +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/entropy_avail +Lines: 1 +3943 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/poolsize +Lines: 1 +4096 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs +Lines: 1 +60 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold +Lines: 1 +3072 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/vm +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/admin_reserve_kbytes +Lines: 1 +8192 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/block_dump +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/compact_unevictable_allowed +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_background_bytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_background_ratio +Lines: 1 +10 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_bytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_expire_centisecs +Lines: 1 +3000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_ratio +Lines: 1 +20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_writeback_centisecs +Lines: 1 +500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirtytime_expire_seconds +Lines: 1 +43200 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/drop_caches +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/extfrag_threshold +Lines: 1 +500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/hugetlb_shm_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/laptop_mode +Lines: 1 +5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/legacy_va_layout +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/lowmem_reserve_ratio +Lines: 1 +256 256 32 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/max_map_count +Lines: 1 +65530 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/memory_failure_early_kill +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/memory_failure_recovery +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/min_free_kbytes +Lines: 1 +67584 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/min_slab_ratio +Lines: 1 +5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/min_unmapped_ratio +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/mmap_min_addr +Lines: 1 +65536 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/nr_hugepages +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/nr_overcommit_hugepages +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/numa_stat +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/numa_zonelist_order +Lines: 1 +Node +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/oom_dump_tasks +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/oom_kill_allocating_task +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/overcommit_kbytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/overcommit_memory +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/overcommit_ratio +Lines: 1 +50 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/page-cluster +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/panic_on_oom +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/percpu_pagelist_fraction +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/stat_interval +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/swappiness +Lines: 1 +60 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/user_reserve_kbytes +Lines: 1 +131072 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/vfs_cache_pressure +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/watermark_boost_factor +Lines: 1 +15000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/watermark_scale_factor +Lines: 1 +10 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/zone_reclaim_mode +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/zoneinfo +Lines: 262 +Node 0, zone DMA + per-node stats + nr_inactive_anon 230981 + nr_active_anon 547580 + nr_inactive_file 316904 + nr_active_file 346282 + nr_unevictable 115467 + nr_slab_reclaimable 131220 + nr_slab_unreclaimable 47320 + nr_isolated_anon 0 + nr_isolated_file 0 + workingset_nodes 11627 + workingset_refault 466886 + workingset_activate 276925 + workingset_restore 84055 + workingset_nodereclaim 487 + nr_anon_pages 795576 + nr_mapped 215483 + nr_file_pages 761874 + nr_dirty 908 + nr_writeback 0 + nr_writeback_temp 0 + nr_shmem 224925 + nr_shmem_hugepages 0 + nr_shmem_pmdmapped 0 + nr_anon_transparent_hugepages 0 + nr_unstable 0 + nr_vmscan_write 12950 + nr_vmscan_immediate_reclaim 3033 + nr_dirtied 8007423 + nr_written 7752121 + nr_kernel_misc_reclaimable 0 + pages free 3952 + min 33 + low 41 + high 49 + spanned 4095 + present 3975 + managed 3956 + protection: (0, 2877, 7826, 7826, 7826) + nr_free_pages 3952 + nr_zone_inactive_anon 0 + nr_zone_active_anon 0 + nr_zone_inactive_file 0 + nr_zone_active_file 0 + nr_zone_unevictable 0 + nr_zone_write_pending 0 + nr_mlock 0 + nr_page_table_pages 0 + nr_kernel_stack 0 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 1 + numa_miss 0 + numa_foreign 0 + numa_interleave 0 + numa_local 1 + numa_other 0 + pagesets + cpu: 0 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 1 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 2 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 3 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 4 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 5 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 6 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 7 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + node_unreclaimable: 0 + start_pfn: 1 +Node 0, zone DMA32 + pages free 204252 + min 19510 + low 21059 + high 22608 + spanned 1044480 + present 759231 + managed 742806 + protection: (0, 0, 4949, 4949, 4949) + nr_free_pages 204252 + nr_zone_inactive_anon 118558 + nr_zone_active_anon 106598 + nr_zone_inactive_file 75475 + nr_zone_active_file 70293 + nr_zone_unevictable 66195 + nr_zone_write_pending 64 + nr_mlock 4 + nr_page_table_pages 1756 + nr_kernel_stack 2208 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 113952967 + numa_miss 0 + numa_foreign 0 + numa_interleave 0 + numa_local 113952967 + numa_other 0 + pagesets + cpu: 0 + count: 345 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 1 + count: 356 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 2 + count: 325 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 3 + count: 346 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 4 + count: 321 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 5 + count: 316 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 6 + count: 373 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 7 + count: 339 + high: 378 + batch: 63 + vm stats threshold: 48 + node_unreclaimable: 0 + start_pfn: 4096 +Node 0, zone Normal + pages free 18553 + min 11176 + low 13842 + high 16508 + spanned 1308160 + present 1308160 + managed 1268711 + protection: (0, 0, 0, 0, 0) + nr_free_pages 18553 + nr_zone_inactive_anon 112423 + nr_zone_active_anon 440982 + nr_zone_inactive_file 241429 + nr_zone_active_file 275989 + nr_zone_unevictable 49272 + nr_zone_write_pending 844 + nr_mlock 154 + nr_page_table_pages 9750 + nr_kernel_stack 15136 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 162718019 + numa_miss 0 + numa_foreign 0 + numa_interleave 26812 + numa_local 162718019 + numa_other 0 + pagesets + cpu: 0 + count: 316 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 1 + count: 366 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 2 + count: 60 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 3 + count: 256 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 4 + count: 253 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 5 + count: 159 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 6 + count: 311 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 7 + count: 264 + high: 378 + batch: 63 + vm stats threshold: 56 + node_unreclaimable: 0 + start_pfn: 1048576 +Node 0, zone Movable + pages free 0 + min 0 + low 0 + high 0 + spanned 0 + present 0 + managed 0 + protection: (0, 0, 0, 0, 0) +Node 0, zone Device + pages free 0 + min 0 + low 0 + high 0 + spanned 0 + present 0 + managed 0 + protection: (0, 0, 0, 0, 0) +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/dm-0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/dm-0/stat +Lines: 1 +6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda/queue +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/add_random +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/chunk_sectors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/dax +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_granularity +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_max_bytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_zeroes_data +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/fua +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/hw_sector_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_poll +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_poll_delay +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_timeout +Lines: 1 +30000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda/queue/iosched +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/back_seek_max +Lines: 1 +16384 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async +Lines: 1 +250 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync +Lines: 1 +125 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/low_latency +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/max_budget +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/slice_idle +Lines: 1 +8 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us +Lines: 1 +8000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/timeout_sync +Lines: 1 +125 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iostats +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/logical_block_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_discard_segments +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb +Lines: 1 +32767 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_integrity_segments +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_sectors_kb +Lines: 1 +1280 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_segment_size +Lines: 1 +65536 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_segments +Lines: 1 +168 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/minimum_io_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nomerges +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nr_requests +Lines: 1 +64 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nr_zones +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/optimal_io_size +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/physical_block_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/read_ahead_kb +Lines: 1 +128 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/rotational +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/rq_affinity +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/scheduler +Lines: 1 +mq-deadline kyber [bfq] none +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/wbt_lat_usec +Lines: 1 +75000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_cache +Lines: 1 +write back +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_same_max_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/zoned +Lines: 1 +none +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/stat +Lines: 1 +9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host/host0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo +Lines: 1 +30 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/fabric_name +Lines: 1 +0x0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/node_name +Lines: 1 +0x2000e0071bce95f2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_id +Lines: 1 +0x000002 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_name +Lines: 1 +0x1000e0071bce95f2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_state +Lines: 1 +Online +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_type +Lines: 1 +Point-To-Point (direct nport connection) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/speed +Lines: 1 +16 Gbit +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host/host0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames +Lines: 1 +0xffffffffffffffff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/error_frames +Lines: 1 +0x0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts +Lines: 1 +0x13 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count +Lines: 1 +0x2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count +Lines: 1 +0x8 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count +Lines: 1 +0x9 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count +Lines: 1 +0x11 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count +Lines: 1 +0x10 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/nos_count +Lines: 1 +0x12 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames +Lines: 1 +0x3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/rx_words +Lines: 1 +0x4 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset +Lines: 1 +0x7 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames +Lines: 1 +0x5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/tx_words +Lines: 1 +0x6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/supported_classes +Lines: 1 +Class 3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/supported_speeds +Lines: 1 +4 Gbit, 8 Gbit, 16 Gbit +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/symbolic_name +Lines: 1 +Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/board_id +Lines: 1 +SM_1141000001000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver +Lines: 1 +2.31.5050 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/hca_type +Lines: 1 +MT4099 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data +Lines: 1 +2221223609 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets +Lines: 1 +87169372 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data +Lines: 1 +26509113295 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets +Lines: 1 +85734114 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait +Lines: 1 +3599 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data +Lines: 1 +2460436784 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets +Lines: 1 +89332064 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data +Lines: 1 +26540356890 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets +Lines: 1 +88622850 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait +Lines: 1 +3846 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/net +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/net/eth0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/addr_assign_type +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/addr_len +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/address +Lines: 1 +01:01:01:01:01:01 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/broadcast +Lines: 1 +ff:ff:ff:ff:ff:ff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_changes +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_down_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_up_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/dev_id +Lines: 1 +0x20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/device +SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/ +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/dormant +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/duplex +Lines: 1 +full +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/flags +Lines: 1 +0x1303 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/ifalias +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/ifindex +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/iflink +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/link_mode +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/mtu +Lines: 1 +1500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/name_assign_type +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/netdev_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/phys_port_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/phys_port_name +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/phys_switch_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/speed +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/tx_queue_len +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/type +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/AC +SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0 +SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw +Lines: 1 +95000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name +Lines: 1 +long_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us +Lines: 1 +999424 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name +Lines: 1 +short_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us +Lines: 1 +2440 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj +Lines: 1 +240422366267 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/name +Lines: 1 +package-0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw +Lines: 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name +Lines: 1 +long_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us +Lines: 1 +976 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj +Lines: 1 +118821284256 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/name +Lines: 1 +core +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl:a +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw +Lines: 1 +95000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name +Lines: 1 +long_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us +Lines: 1 +999424 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name +Lines: 1 +short_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us +Lines: 1 +2440 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj +Lines: 1 +240422366267 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/name +Lines: 1 +package-10 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/cooling_device0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device0/cur_state +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device0/max_state +Lines: 1 +50 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device0/type +Lines: 1 +Processor +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/cooling_device1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device1/cur_state +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device1/max_state +Lines: 1 +27 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device1/type +Lines: 1 +intel_powerclamp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/thermal_zone0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/policy +Lines: 1 +step_wise +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/temp +Lines: 1 +49925 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/type +Lines: 1 +bcm2835_thermal +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/thermal_zone1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/mode +Lines: 1 +enabled +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/passive +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/policy +Lines: 1 +step_wise +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/temp +Lines: 1 +-44000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/type +Lines: 1 +acpitz +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device +SymlinkTo: ../../../ACPI0003:00 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup +Lines: 1 +enabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms +Lines: 1 +10598 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem +SymlinkTo: ../../../../../../../../../class/power_supply +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type +Lines: 1 +Mains +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent +Lines: 2 +POWER_SUPPLY_NAME=AC +POWER_SUPPLY_ONLINE=0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm +Lines: 1 +2369000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity +Lines: 1 +98 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level +Lines: 1 +Normal +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold +Lines: 1 +95 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device +SymlinkTo: ../../../PNP0C0A:00 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full +Lines: 1 +50060000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design +Lines: 1 +47520000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now +Lines: 1 +49450000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer +Lines: 1 +LGC +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name +Lines: 1 +LNV-45N1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now +Lines: 1 +4830000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number +Lines: 1 +38109 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status +Lines: 1 +Discharging +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem +SymlinkTo: ../../../../../../../../../class/power_supply +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology +Lines: 1 +Li-ion +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type +Lines: 1 +Battery +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent +Lines: 16 +POWER_SUPPLY_NAME=BAT0 +POWER_SUPPLY_STATUS=Discharging +POWER_SUPPLY_PRESENT=1 +POWER_SUPPLY_TECHNOLOGY=Li-ion +POWER_SUPPLY_CYCLE_COUNT=0 +POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 +POWER_SUPPLY_VOLTAGE_NOW=11750000 +POWER_SUPPLY_POWER_NOW=5064000 +POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 +POWER_SUPPLY_ENERGY_FULL=47390000 +POWER_SUPPLY_ENERGY_NOW=40730000 +POWER_SUPPLY_CAPACITY=85 +POWER_SUPPLY_CAPACITY_LEVEL=Normal +POWER_SUPPLY_MODEL_NAME=LNV-45N1 +POWER_SUPPLY_MANUFACTURER=LGC +POWER_SUPPLY_SERIAL_NUMBER=38109 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design +Lines: 1 +10800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now +Lines: 1 +12229000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class +Lines: 1 +0x020000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits +Lines: 1 +64 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device +Lines: 1 +0x15d7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits +Lines: 1 +64 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override +Lines: 1 +(null) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq +Lines: 1 +140 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist +Lines: 1 +0-7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus +Lines: 1 +ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias +Lines: 1 +pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource +Lines: 13 +0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision +Lines: 1 +0x21 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device +Lines: 1 +0x225a +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor +Lines: 1 +0x17aa +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent +Lines: 6 +DRIVER=e1000e +PCI_CLASS=20000 +PCI_ID=8086:15D7 +PCI_SUBSYS_ID=17AA:225A +PCI_SLOT_NAME=0000:00:1f.6 +MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor +Lines: 1 +0x8086 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd/0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/0/name +Lines: 1 +demo +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/0/pool +Lines: 1 +iscsi-images +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/1/name +Lines: 1 +wrong +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/1/pool +Lines: 1 +wrong-images +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/clocksource +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/clocksource/clocksource0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource +Lines: 1 +tsc hpet acpi_pm +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource +Lines: 1 +tsc +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq +SymlinkTo: ../cpufreq/policy0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count +Lines: 1 +10084 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count +Lines: 1 +34818 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0/topology +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings +Lines: 1 +ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list +Lines: 1 +0-7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings +Lines: 1 +11 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list +Lines: 1 +0,4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq +Lines: 1 +1200195 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq +Lines: 1 +3300000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq +Lines: 1 +1200000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency +Lines: 1 +4294967295 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus +Lines: 1 +1 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors +Lines: 1 +performance powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver +Lines: 1 +intel_pstate +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor +Lines: 1 +powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq +Lines: 1 +3300000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq +Lines: 1 +1200000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed +Lines: 1 + +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count +Lines: 1 +523 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count +Lines: 1 +34818 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/topology +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings +Lines: 1 +ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list +Lines: 1 +0-7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings +Lines: 1 +22 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list +Lines: 1 +1,5 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq +Lines: 1 +2400000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq +Lines: 1 +800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors +Lines: 1 +performance powersave +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq +Lines: 1 +1219917 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver +Lines: 1 +intel_pstate +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor +Lines: 1 +powersave +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq +Lines: 1 +2400000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq +Lines: 1 +800000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed +Lines: 1 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 +Mode: 777 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug +Lines: 7 +rate: 1.1M/sec +dirty: 20.4G +target: 20.4G +proportional: 427.5k +integral: 790.0k +change: 321.5k/sec +next io: 17ms +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 +Mode: 777 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us +Lines: 1 +1305 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used +Lines: 1 +808189952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used +Lines: 1 +808189952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes +Lines: 1 +808189952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly +Lines: 1 +131072 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used +Lines: 1 +933888 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used +Lines: 1 +1867776 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes +Lines: 1 +1073741824 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes +Lines: 1 +933888 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes +Lines: 1 +1073741824 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used +Lines: 1 +32768 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes +Lines: 1 +8388608 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes +Lines: 1 +8388608 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size +Lines: 1 +20971520 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size +Lines: 1 +20971520 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label +Lines: 1 +fixture +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid +Lines: 1 +0abb23a9-579b-43e6-ad30-227ef47fcb9d +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total +Lines: 1 +644087808 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes +Lines: 1 +644087808 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes +Lines: 1 +644087808 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly +Lines: 1 +262144 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used +Lines: 1 +114688 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total +Lines: 1 +429391872 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used +Lines: 1 +114688 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes +Lines: 1 +429391872 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes +Lines: 1 +114688 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes +Lines: 1 +429391872 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 +SymlinkTo: ../../../../devices/virtual/block/loop22 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 +SymlinkTo: ../../../../devices/virtual/block/loop23 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 +SymlinkTo: ../../../../devices/virtual/block/loop24 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 +SymlinkTo: ../../../../devices/virtual/block/loop25 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid +Lines: 1 +7f07c59f-6136-449c-ab87-e1cf2328731b +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sda1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sda1/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/xfs/sda1/stats/stats +Lines: 1 +extent_alloc 1 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sdb1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sdb1/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/xfs/sdb1/stats/stats +Lines: 1 +extent_alloc 2 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/fileio_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/fileio_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path +Lines: 1 +/home/iscsi/file_back_1G +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/iblock_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path +Lines: 1 +/dev/rbd1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rbd_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path +Lines: 1 +/dev/rbd/iscsi-images/demo +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d +SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +204950 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +10325 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +40325 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026 +SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +104950 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +20095 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +71235 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686 +SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +301950 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +10195 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +30195 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893 +SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +1234 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +1504 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +4733 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 0000000000..0102ab0fd8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "github.com/prometheus/procfs/internal/fs" +) + +// FS represents the pseudo-filesystem sys, which provides an interface to +// kernel data structures. +type FS struct { + proc fs.FS +} + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = fs.DefaultProcMountPoint + +// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. +// It will error if the mount point directory can't be read or is a file. +func NewDefaultFS() (FS, error) { + return NewFS(DefaultMountPoint) +} + +// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error +// if the mount point directory can't be read or is a file. +func NewFS(mountPoint string) (FS, error) { + fs, err := fs.NewFS(mountPoint) + if err != nil { + return FS{}, err + } + return FS{fs}, nil +} diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go new file mode 100644 index 0000000000..8783cf3cc1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -0,0 +1,422 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Fscacheinfo represents fscache statistics. +type Fscacheinfo struct { + // Number of index cookies allocated + IndexCookiesAllocated uint64 + // data storage cookies allocated + DataStorageCookiesAllocated uint64 + // Number of special cookies allocated + SpecialCookiesAllocated uint64 + // Number of objects allocated + ObjectsAllocated uint64 + // Number of object allocation failures + ObjectAllocationsFailure uint64 + // Number of objects that reached the available state + ObjectsAvailable uint64 + // Number of objects that reached the dead state + ObjectsDead uint64 + // Number of objects that didn't have a coherency check + ObjectsWithoutCoherencyCheck uint64 + // Number of objects that passed a coherency check + ObjectsWithCoherencyCheck uint64 + // Number of objects that needed a coherency data update + ObjectsNeedCoherencyCheckUpdate uint64 + // Number of objects that were declared obsolete + ObjectsDeclaredObsolete uint64 + // Number of pages marked as being cached + PagesMarkedAsBeingCached uint64 + // Number of uncache page requests seen + UncachePagesRequestSeen uint64 + // Number of acquire cookie requests seen + AcquireCookiesRequestSeen uint64 + // Number of acq reqs given a NULL parent + AcquireRequestsWithNullParent uint64 + // Number of acq reqs rejected due to no cache available + AcquireRequestsRejectedNoCacheAvailable uint64 + // Number of acq reqs succeeded + AcquireRequestsSucceeded uint64 + // Number of acq reqs rejected due to error + AcquireRequestsRejectedDueToError uint64 + // Number of acq reqs failed on ENOMEM + AcquireRequestsFailedDueToEnomem uint64 + // Number of lookup calls made on cache backends + LookupsNumber uint64 + // Number of negative lookups made + LookupsNegative uint64 + // Number of positive lookups made + LookupsPositive uint64 + // Number of objects created by lookup + ObjectsCreatedByLookup uint64 + // Number of lookups timed out and requeued + LookupsTimedOutAndRequed uint64 + InvalidationsNumber uint64 + InvalidationsRunning uint64 + // Number of update cookie requests seen + UpdateCookieRequestSeen uint64 + // Number of upd reqs given a NULL parent + UpdateRequestsWithNullParent uint64 + // Number of upd reqs granted CPU time + UpdateRequestsRunning uint64 + // Number of relinquish cookie requests seen + RelinquishCookiesRequestSeen uint64 + // Number of rlq reqs given a NULL parent + RelinquishCookiesWithNullParent uint64 + // Number of rlq reqs waited on completion of creation + RelinquishRequestsWaitingCompleteCreation uint64 + // Relinqs rtr + RelinquishRetries uint64 + // Number of attribute changed requests seen + AttributeChangedRequestsSeen uint64 + // Number of attr changed requests queued + AttributeChangedRequestsQueued uint64 + // Number of attr changed rejected -ENOBUFS + AttributeChangedRejectDueToEnobufs uint64 + // Number of attr changed failed -ENOMEM + AttributeChangedFailedDueToEnomem uint64 + // Number of attr changed ops given CPU time + AttributeChangedOps uint64 + // Number of allocation requests seen + AllocationRequestsSeen uint64 + // Number of successful alloc reqs + AllocationOkRequests uint64 + // Number of alloc reqs that waited on lookup completion + AllocationWaitingOnLookup uint64 + // Number of alloc reqs rejected -ENOBUFS + AllocationsRejectedDueToEnobufs uint64 + // Number of alloc reqs aborted -ERESTARTSYS + AllocationsAbortedDueToErestartsys uint64 + // Number of alloc reqs submitted + AllocationOperationsSubmitted uint64 + // Number of alloc reqs waited for CPU time + AllocationsWaitedForCPU uint64 + // Number of alloc reqs aborted due to object death + AllocationsAbortedDueToObjectDeath uint64 + // Number of retrieval (read) requests seen + RetrievalsReadRequests uint64 + // Number of successful retr reqs + RetrievalsOk uint64 + // Number of retr reqs that waited on lookup completion + RetrievalsWaitingLookupCompletion uint64 + // Number of retr reqs returned -ENODATA + RetrievalsReturnedEnodata uint64 + // Number of retr reqs rejected -ENOBUFS + RetrievalsRejectedDueToEnobufs uint64 + // Number of retr reqs aborted -ERESTARTSYS + RetrievalsAbortedDueToErestartsys uint64 + // Number of retr reqs failed -ENOMEM + RetrievalsFailedDueToEnomem uint64 + // Number of retr reqs submitted + RetrievalsRequests uint64 + // Number of retr reqs waited for CPU time + RetrievalsWaitingCPU uint64 + // Number of retr reqs aborted due to object death + RetrievalsAbortedDueToObjectDeath uint64 + // Number of storage (write) requests seen + StoreWriteRequests uint64 + // Number of successful store reqs + StoreSuccessfulRequests uint64 + // Number of store reqs on a page already pending storage + StoreRequestsOnPendingStorage uint64 + // Number of store reqs rejected -ENOBUFS + StoreRequestsRejectedDueToEnobufs uint64 + // Number of store reqs failed -ENOMEM + StoreRequestsFailedDueToEnomem uint64 + // Number of store reqs submitted + StoreRequestsSubmitted uint64 + // Number of store reqs granted CPU time + StoreRequestsRunning uint64 + // Number of pages given store req processing time + StorePagesWithRequestsProcessing uint64 + // Number of store reqs deleted from tracking tree + StoreRequestsDeleted uint64 + // Number of store reqs over store limit + StoreRequestsOverStoreLimit uint64 + // Number of release reqs against pages with no pending store + ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 + // Number of release reqs against pages stored by time lock granted + ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 + // Number of release reqs ignored due to in-progress store + ReleaseRequestsIgnoredDueToInProgressStore uint64 + // Number of page stores cancelled due to release req + PageStoresCancelledByReleaseRequests uint64 + VmscanWaiting uint64 + // Number of times async ops added to pending queues + OpsPending uint64 + // Number of times async ops given CPU time + OpsRunning uint64 + // Number of times async ops queued for processing + OpsEnqueued uint64 + // Number of async ops cancelled + OpsCancelled uint64 + // Number of async ops rejected due to object lookup/create failure + OpsRejected uint64 + // Number of async ops initialised + OpsInitialised uint64 + // Number of async ops queued for deferred release + OpsDeferred uint64 + // Number of async ops released (should equal ini=N when idle) + OpsReleased uint64 + // Number of deferred-release async ops garbage collected + OpsGarbageCollected uint64 + // Number of in-progress alloc_object() cache ops + CacheopAllocationsinProgress uint64 + // Number of in-progress lookup_object() cache ops + CacheopLookupObjectInProgress uint64 + // Number of in-progress lookup_complete() cache ops + CacheopLookupCompleteInPorgress uint64 + // Number of in-progress grab_object() cache ops + CacheopGrabObjectInProgress uint64 + CacheopInvalidations uint64 + // Number of in-progress update_object() cache ops + CacheopUpdateObjectInProgress uint64 + // Number of in-progress drop_object() cache ops + CacheopDropObjectInProgress uint64 + // Number of in-progress put_object() cache ops + CacheopPutObjectInProgress uint64 + // Number of in-progress attr_changed() cache ops + CacheopAttributeChangeInProgress uint64 + // Number of in-progress sync_cache() cache ops + CacheopSyncCacheInProgress uint64 + // Number of in-progress read_or_alloc_page() cache ops + CacheopReadOrAllocPageInProgress uint64 + // Number of in-progress read_or_alloc_pages() cache ops + CacheopReadOrAllocPagesInProgress uint64 + // Number of in-progress allocate_page() cache ops + CacheopAllocatePageInProgress uint64 + // Number of in-progress allocate_pages() cache ops + CacheopAllocatePagesInProgress uint64 + // Number of in-progress write_page() cache ops + CacheopWritePagesInProgress uint64 + // Number of in-progress uncache_page() cache ops + CacheopUncachePagesInProgress uint64 + // Number of in-progress dissociate_pages() cache ops + CacheopDissociatePagesInProgress uint64 + // Number of object lookups/creations rejected due to lack of space + CacheevLookupsAndCreationsRejectedLackSpace uint64 + // Number of stale objects deleted + CacheevStaleObjectsDeleted uint64 + // Number of objects retired when relinquished + CacheevRetiredWhenReliquished uint64 + // Number of objects culled + CacheevObjectsCulled uint64 +} + +// Fscacheinfo returns information about current fscache statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt +func (fs FS) Fscacheinfo() (Fscacheinfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) + if err != nil { + return Fscacheinfo{}, err + } + + m, err := parseFscacheinfo(bytes.NewReader(b)) + if err != nil { + return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err) + } + + return *m, nil +} + +func setFSCacheFields(fields []string, setFields ...*uint64) error { + var err error + if len(fields) < len(setFields) { + return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) + } + + for i := range setFields { + *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) + if err != nil { + return err + } + } + return nil +} + +func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { + var m Fscacheinfo + s := bufio.NewScanner(r) + for s.Scan() { + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) + } + + switch fields[0] { + case "Cookies:": + err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, + &m.SpecialCookiesAllocated) + if err != nil { + return &m, err + } + case "Objects:": + err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, + &m.ObjectsAvailable, &m.ObjectsDead) + if err != nil { + return &m, err + } + case "ChkAux": + err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, + &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) + if err != nil { + return &m, err + } + case "Pages": + err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) + if err != nil { + return &m, err + } + case "Acquire:": + err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, + &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, + &m.AcquireRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + case "Lookups:": + err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, + &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) + if err != nil { + return &m, err + } + case "Invals": + err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) + if err != nil { + return &m, err + } + case "Updates:": + err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, + &m.UpdateRequestsRunning) + if err != nil { + return &m, err + } + case "Relinqs:": + err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, + &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) + if err != nil { + return &m, err + } + case "AttrChg:": + err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, + &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) + if err != nil { + return &m, err + } + case "Allocs": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, + &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, + &m.AllocationsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Retrvls:": + if strings.Split(fields[1], "=")[0] == "n" { + err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, + &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, + &m.RetrievalsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Stores": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, + &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, + &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) + if err != nil { + return &m, err + } + } + case "VmScan": + err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, + &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, + &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) + if err != nil { + return &m, err + } + case "Ops": + if strings.Split(fields[2], "=")[0] == "pend" { + err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) + if err != nil { + return &m, err + } + } + case "CacheOp:": + if strings.Split(fields[1], "=")[0] == "alo" { + err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, + &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) + if err != nil { + return &m, err + } + } else if strings.Split(fields[1], "=")[0] == "inv" { + err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, + &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, + &m.CacheopSyncCacheInProgress) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, + &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, + &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) + if err != nil { + return &m, err + } + } + case "CacheEv:": + err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, + &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) + if err != nil { + return &m, err + } + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod new file mode 100644 index 0000000000..ded48253cd --- /dev/null +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -0,0 +1,9 @@ +module github.com/prometheus/procfs + +go 1.12 + +require ( + github.com/google/go-cmp v0.3.1 + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e + golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e +) diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum new file mode 100644 index 0000000000..54b5f33033 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/go.sum @@ -0,0 +1,6 @@ +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go new file mode 100644 index 0000000000..565e89e42c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -0,0 +1,55 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "os" + "path/filepath" +) + +const ( + // DefaultProcMountPoint is the common mount point of the proc filesystem. + DefaultProcMountPoint = "/proc" + + // DefaultSysMountPoint is the common mount point of the sys filesystem. + DefaultSysMountPoint = "/sys" + + // DefaultConfigfsMountPoint is the common mount point of the configfs + DefaultConfigfsMountPoint = "/sys/kernel/config" +) + +// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an +// interface to kernel data structures. +type FS string + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path appends the given path elements to the filesystem path, adding separators +// as necessary. +func (fs FS) Path(p ...string) string { + return filepath.Join(append([]string{string(fs)}, p...)...) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 0000000000..22cb07a6bb --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,97 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io/ioutil" + "strconv" + "strings" +) + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} + +// ParsePInt64s parses a slice of strings into a slice of int64 pointers. +func ParsePInt64s(ss []string) ([]*int64, error) { + us := make([]*int64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// ReadIntFromFile reads a file and attempts to parse a int64 from it. +func ReadIntFromFile(path string) (int64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} + +// ParseBool parses a string into a boolean pointer. +func ParseBool(b string) *bool { + var truth bool + switch b { + case "enabled": + truth = true + case "disabled": + truth = false + default: + return nil + } + return &truth +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go new file mode 100644 index 0000000000..8051161b2a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io" + "io/ioutil" + "os" +) + +// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. +// This is similar to ioutil.ReadFile but without the call to os.Stat, because +// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). +// Reads a max file size of 512kB. For files larger than this, a scanner +// should be used. +func ReadFileNoStat(filename string) ([]byte, error) { + const maxBufferSize = 1024 * 512 + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + reader := io.LimitReader(f, maxBufferSize) + return ioutil.ReadAll(reader) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go new file mode 100644 index 0000000000..c07de0b6c9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -0,0 +1,48 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,!appengine + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +// +// Note that this function will not read files larger than 128 bytes. +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + const sysFileBufferSize = 128 + b := make([]byte, sysFileBufferSize) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go new file mode 100644 index 0000000000..bd55b45377 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -0,0 +1,26 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,appengine !linux + +package util + +import ( + "fmt" +) + +// SysReadFile is here implemented as a noop for builds that do not support +// the read syscall. For example Windows, or Linux on Google App Engine. +func SysReadFile(file string) (string, error) { + return "", fmt.Errorf("not supported on this platform") +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go new file mode 100644 index 0000000000..fe2355d3c6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "strconv" +) + +// TODO(mdlayher): util packages are an anti-pattern and this should be moved +// somewhere else that is more focused in the future. + +// A ValueParser enables parsing a single string into a variety of data types +// in a concise and safe way. The Err method must be invoked after invoking +// any other methods to ensure a value was successfully parsed. +type ValueParser struct { + v string + err error +} + +// NewValueParser creates a ValueParser using the input string. +func NewValueParser(v string) *ValueParser { + return &ValueParser{v: v} +} + +// Int interprets the underlying value as an int and returns that value. +func (vp *ValueParser) Int() int { return int(vp.int64()) } + +// PInt64 interprets the underlying value as an int64 and returns a pointer to +// that value. +func (vp *ValueParser) PInt64() *int64 { + if vp.err != nil { + return nil + } + + v := vp.int64() + return &v +} + +// int64 interprets the underlying value as an int64 and returns that value. +// TODO: export if/when necessary. +func (vp *ValueParser) int64() int64 { + if vp.err != nil { + return 0 + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseInt(vp.v, base, 64) + if err != nil { + vp.err = err + return 0 + } + + return v +} + +// PUInt64 interprets the underlying value as an uint64 and returns a pointer to +// that value. +func (vp *ValueParser) PUInt64() *uint64 { + if vp.err != nil { + return nil + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseUint(vp.v, base, 64) + if err != nil { + vp.err = err + return nil + } + + return &v +} + +// Err returns the last error, if any, encountered by the ValueParser. +func (vp *ValueParser) Err() error { + return vp.err +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 0000000000..89e447746c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,241 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The remote (real) IP address. + RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The remote (real) port. + RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) IPVSStats() (IPVSStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + + return parseIPVSStats(bytes.NewReader(data)) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(r io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(r) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go new file mode 100644 index 0000000000..da3a941d60 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -0,0 +1,62 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "os" + + "github.com/prometheus/procfs/internal/util" +) + +// KernelRandom contains information about to the kernel's random number generator. +type KernelRandom struct { + // EntropyAvaliable gives the available entropy, in bits. + EntropyAvaliable *uint64 + // PoolSize gives the size of the entropy pool, in bits. + PoolSize *uint64 + // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. + URandomMinReseedSeconds *uint64 + // WriteWakeupThreshold the number of bits of entropy below which we wake up processes + // that do a select(2) or poll(2) for write access to /dev/random. + WriteWakeupThreshold *uint64 + // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep + // waiting for entropy from /dev/random. + ReadWakeupThreshold *uint64 +} + +// KernelRandom returns values from /proc/sys/kernel/random. +func (fs FS) KernelRandom() (KernelRandom, error) { + random := KernelRandom{} + + for file, p := range map[string]**uint64{ + "entropy_avail": &random.EntropyAvaliable, + "poolsize": &random.PoolSize, + "urandom_min_reseed_secs": &random.URandomMinReseedSeconds, + "write_wakeup_threshold": &random.WriteWakeupThreshold, + "read_wakeup_threshold": &random.ReadWakeupThreshold, + } { + val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) + if os.IsNotExist(err) { + continue + } + if err != nil { + return random, err + } + *p = &val + } + + return random, nil +} diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go new file mode 100644 index 0000000000..00bbe14417 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -0,0 +1,62 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// LoadAvg represents an entry in /proc/loadavg +type LoadAvg struct { + Load1 float64 + Load5 float64 + Load15 float64 +} + +// LoadAvg returns loadavg from /proc. +func (fs FS) LoadAvg() (*LoadAvg, error) { + path := fs.proc.Path("loadavg") + + data, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + return parseLoad(data) +} + +// Parse /proc loadavg and return 1m, 5m and 15m. +func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { + loads := make([]float64, 3) + parts := strings.Fields(string(loadavgBytes)) + if len(parts) < 3 { + return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes)) + } + + var err error + for i, load := range parts[0:3] { + loads[i], err = strconv.ParseFloat(load, 64) + if err != nil { + return nil, fmt.Errorf("could not parse load '%s': %s", load, err) + } + } + return &LoadAvg{ + Load1: loads[0], + Load5: loads[1], + Load15: loads[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 0000000000..98e37aa8ca --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,197 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device requires. + DisksTotal int64 + // Number of failed disks. + DisksFailed int64 + // Spare disks in the device. + DisksSpare int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. More information available here: +// https://raid.wiki.kernel.org/index.php/Mdstat +func (fs FS) MDStat() ([]MDStat, error) { + data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) + if err != nil { + return nil, err + } + mdstat, err := parseMDStat(data) + if err != nil { + return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) + } + return mdstat, nil +} + +// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. +func parseMDStat(mdStatData []byte) ([]MDStat, error) { + mdStats := []MDStat{} + lines := strings.Split(string(mdStatData), "\n") + + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || + strings.HasPrefix(line, "Personalities") || + strings.HasPrefix(line, "unused") { + continue + } + + deviceFields := strings.Fields(line) + if len(deviceFields) < 3 { + return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + } + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive + + if len(lines) <= i+3 { + return nil, fmt.Errorf( + "error parsing %s: too few lines for md device", + mdName, + ) + } + + // Failed disks have the suffix (F) & Spare disks have the suffix (S). + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + active, total, size, err := evalStatusLine(lines[i], lines[i+1]) + + if err != nil { + return nil, fmt.Errorf("error parsing md device lines: %s", err) + } + + syncLineIdx := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + syncLineIdx++ + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + checking := strings.Contains(lines[syncLineIdx], "check") + + // Append recovery and resyncing state info. + if recovering || resyncing || checking { + if recovering { + state = "recovering" + } else if checking { + state = "checking" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || + strings.Contains(lines[syncLineIdx], "DELAYED") { + syncedBlocks = 0 + } else { + syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err) + } + } + } + + mdStats = append(mdStats, MDStat{ + Name: mdName, + ActivityState: state, + DisksActive: active, + DisksFailed: fail, + DisksSpare: spare, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStats, nil +} + +func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) { + + sizeStr := strings.Fields(statusLine)[0] + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + } + + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total = int64(strings.Count(deviceLine, "[")) + return total, total, size, nil + } + + if strings.Contains(deviceLine, "inactive") { + return 0, 0, size, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLine) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + } + + return active, total, size, nil +} + +func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) { + matches := recoveryLineRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine) + } + + return syncedBlocks, nil +} diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go new file mode 100644 index 0000000000..50dab4bcd5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -0,0 +1,277 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Meminfo represents memory statistics. +type Meminfo struct { + // Total usable ram (i.e. physical ram minus a few reserved + // bits and the kernel binary code) + MemTotal uint64 + // The sum of LowFree+HighFree + MemFree uint64 + // An estimate of how much memory is available for starting + // new applications, without swapping. Calculated from + // MemFree, SReclaimable, the size of the file LRU lists, and + // the low watermarks in each zone. The estimate takes into + // account that the system needs some page cache to function + // well, and that not all reclaimable slab will be + // reclaimable, due to items being in use. The impact of those + // factors will vary from system to system. + MemAvailable uint64 + // Relatively temporary storage for raw disk blocks shouldn't + // get tremendously large (20MB or so) + Buffers uint64 + Cached uint64 + // Memory that once was swapped out, is swapped back in but + // still also is in the swapfile (if memory is needed it + // doesn't need to be swapped out AGAIN because it is already + // in the swapfile. This saves I/O) + SwapCached uint64 + // Memory that has been used more recently and usually not + // reclaimed unless absolutely necessary. + Active uint64 + // Memory which has been less recently used. It is more + // eligible to be reclaimed for other purposes + Inactive uint64 + ActiveAnon uint64 + InactiveAnon uint64 + ActiveFile uint64 + InactiveFile uint64 + Unevictable uint64 + Mlocked uint64 + // total amount of swap space available + SwapTotal uint64 + // Memory which has been evicted from RAM, and is temporarily + // on the disk + SwapFree uint64 + // Memory which is waiting to get written back to the disk + Dirty uint64 + // Memory which is actively being written back to the disk + Writeback uint64 + // Non-file backed pages mapped into userspace page tables + AnonPages uint64 + // files which have been mapped, such as libraries + Mapped uint64 + Shmem uint64 + // in-kernel data structures cache + Slab uint64 + // Part of Slab, that might be reclaimed, such as caches + SReclaimable uint64 + // Part of Slab, that cannot be reclaimed on memory pressure + SUnreclaim uint64 + KernelStack uint64 + // amount of memory dedicated to the lowest level of page + // tables. + PageTables uint64 + // NFS pages sent to the server, but not yet committed to + // stable storage + NFSUnstable uint64 + // Memory used for block device "bounce buffers" + Bounce uint64 + // Memory used by FUSE for temporary writeback buffers + WritebackTmp uint64 + // Based on the overcommit ratio ('vm.overcommit_ratio'), + // this is the total amount of memory currently available to + // be allocated on the system. This limit is only adhered to + // if strict overcommit accounting is enabled (mode 2 in + // 'vm.overcommit_memory'). + // The CommitLimit is calculated with the following formula: + // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * + // overcommit_ratio / 100 + [total swap pages] + // For example, on a system with 1G of physical RAM and 7G + // of swap with a `vm.overcommit_ratio` of 30 it would + // yield a CommitLimit of 7.3G. + // For more details, see the memory overcommit documentation + // in vm/overcommit-accounting. + CommitLimit uint64 + // The amount of memory presently allocated on the system. + // The committed memory is a sum of all of the memory which + // has been allocated by processes, even if it has not been + // "used" by them as of yet. A process which malloc()'s 1G + // of memory, but only touches 300M of it will show up as + // using 1G. This 1G is memory which has been "committed" to + // by the VM and can be used at any time by the allocating + // application. With strict overcommit enabled on the system + // (mode 2 in 'vm.overcommit_memory'),allocations which would + // exceed the CommitLimit (detailed above) will not be permitted. + // This is useful if one needs to guarantee that processes will + // not fail due to lack of memory once that memory has been + // successfully allocated. + CommittedAS uint64 + // total size of vmalloc memory area + VmallocTotal uint64 + // amount of vmalloc area which is used + VmallocUsed uint64 + // largest contiguous block of vmalloc area which is free + VmallocChunk uint64 + HardwareCorrupted uint64 + AnonHugePages uint64 + ShmemHugePages uint64 + ShmemPmdMapped uint64 + CmaTotal uint64 + CmaFree uint64 + HugePagesTotal uint64 + HugePagesFree uint64 + HugePagesRsvd uint64 + HugePagesSurp uint64 + Hugepagesize uint64 + DirectMap4k uint64 + DirectMap2M uint64 + DirectMap1G uint64 +} + +// Meminfo returns an information about current kernel/system memory statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Meminfo() (Meminfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) + if err != nil { + return Meminfo{}, err + } + + m, err := parseMemInfo(bytes.NewReader(b)) + if err != nil { + return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err) + } + + return *m, nil +} + +func parseMemInfo(r io.Reader) (*Meminfo, error) { + var m Meminfo + s := bufio.NewScanner(r) + for s.Scan() { + // Each line has at least a name and value; we ignore the unit. + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) + } + + v, err := strconv.ParseUint(fields[1], 0, 64) + if err != nil { + return nil, err + } + + switch fields[0] { + case "MemTotal:": + m.MemTotal = v + case "MemFree:": + m.MemFree = v + case "MemAvailable:": + m.MemAvailable = v + case "Buffers:": + m.Buffers = v + case "Cached:": + m.Cached = v + case "SwapCached:": + m.SwapCached = v + case "Active:": + m.Active = v + case "Inactive:": + m.Inactive = v + case "Active(anon):": + m.ActiveAnon = v + case "Inactive(anon):": + m.InactiveAnon = v + case "Active(file):": + m.ActiveFile = v + case "Inactive(file):": + m.InactiveFile = v + case "Unevictable:": + m.Unevictable = v + case "Mlocked:": + m.Mlocked = v + case "SwapTotal:": + m.SwapTotal = v + case "SwapFree:": + m.SwapFree = v + case "Dirty:": + m.Dirty = v + case "Writeback:": + m.Writeback = v + case "AnonPages:": + m.AnonPages = v + case "Mapped:": + m.Mapped = v + case "Shmem:": + m.Shmem = v + case "Slab:": + m.Slab = v + case "SReclaimable:": + m.SReclaimable = v + case "SUnreclaim:": + m.SUnreclaim = v + case "KernelStack:": + m.KernelStack = v + case "PageTables:": + m.PageTables = v + case "NFS_Unstable:": + m.NFSUnstable = v + case "Bounce:": + m.Bounce = v + case "WritebackTmp:": + m.WritebackTmp = v + case "CommitLimit:": + m.CommitLimit = v + case "Committed_AS:": + m.CommittedAS = v + case "VmallocTotal:": + m.VmallocTotal = v + case "VmallocUsed:": + m.VmallocUsed = v + case "VmallocChunk:": + m.VmallocChunk = v + case "HardwareCorrupted:": + m.HardwareCorrupted = v + case "AnonHugePages:": + m.AnonHugePages = v + case "ShmemHugePages:": + m.ShmemHugePages = v + case "ShmemPmdMapped:": + m.ShmemPmdMapped = v + case "CmaTotal:": + m.CmaTotal = v + case "CmaFree:": + m.CmaFree = v + case "HugePages_Total:": + m.HugePagesTotal = v + case "HugePages_Free:": + m.HugePagesFree = v + case "HugePages_Rsvd:": + m.HugePagesRsvd = v + case "HugePages_Surp:": + m.HugePagesSurp = v + case "Hugepagesize:": + m.Hugepagesize = v + case "DirectMap4k:": + m.DirectMap4k = v + case "DirectMap2M:": + m.DirectMap2M = v + case "DirectMap1G:": + m.DirectMap1G = v + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go new file mode 100644 index 0000000000..59f4d50558 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -0,0 +1,180 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A MountInfo is a type that describes the details, options +// for each mount, parsed from /proc/self/mountinfo. +// The fields described in each entry of /proc/self/mountinfo +// is described in the following man page. +// http://man7.org/linux/man-pages/man5/proc.5.html +type MountInfo struct { + // Unique ID for the mount + MountID int + // The ID of the parent mount + ParentID int + // The value of `st_dev` for the files on this FS + MajorMinorVer string + // The pathname of the directory in the FS that forms + // the root for this mount + Root string + // The pathname of the mount point relative to the root + MountPoint string + // Mount options + Options map[string]string + // Zero or more optional fields + OptionalFields map[string]string + // The Filesystem type + FSType string + // FS specific information or "none" + Source string + // Superblock options + SuperOptions map[string]string +} + +// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. +func parseMountInfo(info []byte) ([]*MountInfo, error) { + mounts := []*MountInfo{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseMountInfoString(mountString) + if err != nil { + return nil, err + } + mounts = append(mounts, parsedMounts) + } + + err := scanner.Err() + return mounts, err +} + +// Parses a mountinfo file line, and converts it to a MountInfo struct. +// An important check here is to see if the hyphen separator, as if it does not exist, +// it means that the line is malformed. +func parseMountInfoString(mountString string) (*MountInfo, error) { + var err error + + mountInfo := strings.Split(mountString, " ") + mountInfoLength := len(mountInfo) + if mountInfoLength < 10 { + return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) + } + + if mountInfo[mountInfoLength-4] != "-" { + return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) + } + + mount := &MountInfo{ + MajorMinorVer: mountInfo[2], + Root: mountInfo[3], + MountPoint: mountInfo[4], + Options: mountOptionsParser(mountInfo[5]), + OptionalFields: nil, + FSType: mountInfo[mountInfoLength-3], + Source: mountInfo[mountInfoLength-2], + SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), + } + + mount.MountID, err = strconv.Atoi(mountInfo[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse mount ID") + } + mount.ParentID, err = strconv.Atoi(mountInfo[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse parent ID") + } + // Has optional fields, which is a space separated list of values. + // Example: shared:2 master:7 + if mountInfo[6] != "" { + mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) + if err != nil { + return nil, err + } + } + return mount, nil +} + +// mountOptionsIsValidField checks a string against a valid list of optional fields keys. +func mountOptionsIsValidField(s string) bool { + switch s { + case + "shared", + "master", + "propagate_from", + "unbindable": + return true + } + return false +} + +// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. +func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { + optionalFields := make(map[string]string) + for _, field := range o { + optionSplit := strings.SplitN(field, ":", 2) + value := "" + if len(optionSplit) == 2 { + value = optionSplit[1] + } + if mountOptionsIsValidField(optionSplit[0]) { + optionalFields[optionSplit[0]] = value + } + } + return optionalFields, nil +} + +// mountOptionsParser parses the mount options, superblock options. +func mountOptionsParser(mountOptions string) map[string]string { + opts := make(map[string]string) + options := strings.Split(mountOptions, ",") + for _, opt := range options { + splitOption := strings.Split(opt, "=") + if len(splitOption) < 2 { + key := splitOption[0] + opts[key] = "" + } else { + key, value := splitOption[0], splitOption[1] + opts[key] = value + } + } + return opts +} + +// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. +func GetMounts() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat("/proc/self/mountinfo") + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. +func GetProcMounts(pid int) ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 0000000000..861ced9da0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,629 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10TCPLen = 10 + fieldTransport10UDPLen = 7 + + fieldTransport11TCPLen = 13 + fieldTransport11UDPLen = 10 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The mount options of the NFS mount. + Opts map[string]string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueMilliseconds uint64 + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseMilliseconds uint64 + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestMilliseconds uint64 + // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. + Errors uint64 +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The transport protocol used for the NFS mount. + Protocol string + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTimeSeconds uint64 + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldOpts = "opts:" + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + + switch ss[0] { + case fieldOpts: + if stats.Opts == nil { + stats.Opts = map[string]string{} + } + for _, opt := range strings.Split(ss[1], ",") { + split := strings.Split(opt, "=") + if len(split) == 2 { + stats.Opts[split[0]] = split[1] + } else { + stats.Opts[opt] = "" + } + } + case fieldAge: + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[1:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Minimum number of expected fields in each per-operation statistics set + minFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) < minFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, minFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + opStats := NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueMilliseconds: ns[5], + CumulativeTotalResponseMilliseconds: ns[6], + CumulativeTotalRequestMilliseconds: ns[7], + } + + if len(ns) > 8 { + opStats.Errors = ns[8] + } + + ops = append(ops, opStats) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + // Extract the protocol field. It is the only string value in the line + protocol := ss[0] + ss = ss[1:] + + switch statVersion { + case statVersion10: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport10TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport10UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport11TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport11UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. Since the stat length is bigger for TCP stats, we use + // the TCP length here. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11TCPLen) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + // The fields differ depending on the transport protocol (TCP or UDP) + // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt + // + // For the udp RPC transport there is no connection count, connect idle time, + // or idle time (fields #3, #4, and #5); all other fields are the same. So + // we set them to 0 here. + if protocol == "udp" { + ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } + + return &NFSTransportStats{ + Protocol: protocol, + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTimeSeconds: ns[4], + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go new file mode 100644 index 0000000000..b637be9845 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -0,0 +1,153 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A ConntrackStatEntry represents one line from net/stat/nf_conntrack +// and contains netfilter conntrack statistics at one CPU core +type ConntrackStatEntry struct { + Entries uint64 + Found uint64 + Invalid uint64 + Ignore uint64 + Insert uint64 + InsertFailed uint64 + Drop uint64 + EarlyDrop uint64 + SearchRestart uint64 +} + +// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores +func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { + return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) +} + +// Parses a slice of ConntrackStatEntries from the given filepath +func readConntrackStat(path string) ([]ConntrackStatEntry, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(path) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseConntrackStat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err) + } + + return stat, nil +} + +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries +func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { + var entries []ConntrackStatEntry + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + conntrackEntry, err := parseConntrackStatEntry(fields) + if err != nil { + return nil, err + } + entries = append(entries, *conntrackEntry) + } + + return entries, nil +} + +// Parses a ConntrackStatEntry from given array of fields +func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { + if len(fields) != 17 { + return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") + } + entry := &ConntrackStatEntry{} + + entries, err := parseConntrackStatField(fields[0]) + if err != nil { + return nil, err + } + entry.Entries = entries + + found, err := parseConntrackStatField(fields[2]) + if err != nil { + return nil, err + } + entry.Found = found + + invalid, err := parseConntrackStatField(fields[4]) + if err != nil { + return nil, err + } + entry.Invalid = invalid + + ignore, err := parseConntrackStatField(fields[5]) + if err != nil { + return nil, err + } + entry.Ignore = ignore + + insert, err := parseConntrackStatField(fields[8]) + if err != nil { + return nil, err + } + entry.Insert = insert + + insertFailed, err := parseConntrackStatField(fields[9]) + if err != nil { + return nil, err + } + entry.InsertFailed = insertFailed + + drop, err := parseConntrackStatField(fields[10]) + if err != nil { + return nil, err + } + entry.Drop = drop + + earlyDrop, err := parseConntrackStatField(fields[11]) + if err != nil { + return nil, err + } + entry.EarlyDrop = earlyDrop + + searchRestart, err := parseConntrackStatField(fields[16]) + if err != nil { + return nil, err + } + entry.SearchRestart = searchRestart + + return entry, nil +} + +// Parses a uint64 from given hex in string +func parseConntrackStatField(field string) (uint64, error) { + val, err := strconv.ParseUint(field, 16, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err) + } + return val, err +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 0000000000..47a710befb --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,205 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NetDev() (NetDev, error) { + return newNetDev(fs.proc.Path("net/dev")) +} + +// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + netDev := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := netDev.parseLine(s.Text()) + if err != nil { + return netDev, err + } + + netDev[line.Name] = *line + } + + return netDev, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { + parts := strings.SplitN(rawLine, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(parts[1])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(parts[0]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma separated list of interface names. +func (netDev NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(netDev)) + for _, ifc := range netDev { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go new file mode 100644 index 0000000000..f91ef55237 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -0,0 +1,163 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, +// respectively. +type NetSockstat struct { + // Used is non-nil for IPv4 sockstat results, but nil for IPv6. + Used *int + Protocols []NetSockstatProtocol +} + +// A NetSockstatProtocol contains statistics about a given socket protocol. +// Pointer fields indicate that the value may or may not be present on any +// given protocol. +type NetSockstatProtocol struct { + Protocol string + InUse int + Orphan *int + TW *int + Alloc *int + Mem *int + Memory *int +} + +// NetSockstat retrieves IPv4 socket statistics. +func (fs FS) NetSockstat() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat")) +} + +// NetSockstat6 retrieves IPv6 socket statistics. +// +// If IPv6 is disabled on this kernel, the returned error can be checked with +// os.IsNotExist. +func (fs FS) NetSockstat6() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat6")) +} + +// readSockstat opens and parses a NetSockstat from the input file. +func readSockstat(name string) (*NetSockstat, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(name) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseSockstat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err) + } + + return stat, nil +} + +// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. +func parseSockstat(r io.Reader) (*NetSockstat, error) { + var stat NetSockstat + s := bufio.NewScanner(r) + for s.Scan() { + // Expect a minimum of a protocol and one key/value pair. + fields := strings.Split(s.Text(), " ") + if len(fields) < 3 { + return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + } + + // The remaining fields are key/value pairs. + kvs, err := parseSockstatKVs(fields[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err) + } + + // The first field is the protocol. We must trim its colon suffix. + proto := strings.TrimSuffix(fields[0], ":") + switch proto { + case "sockets": + // Special case: IPv4 has a sockets "used" key/value pair that we + // embed at the top level of the structure. + used := kvs["used"] + stat.Used = &used + default: + // Parse all other lines as individual protocols. + nsp := parseSockstatProtocol(kvs) + nsp.Protocol = proto + stat.Protocols = append(stat.Protocols, nsp) + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return &stat, nil +} + +// parseSockstatKVs parses a string slice into a map of key/value pairs. +func parseSockstatKVs(kvs []string) (map[string]int, error) { + if len(kvs)%2 != 0 { + return nil, errors.New("odd number of fields in key/value pairs") + } + + // Iterate two values at a time to gather key/value pairs. + out := make(map[string]int, len(kvs)/2) + for i := 0; i < len(kvs); i += 2 { + vp := util.NewValueParser(kvs[i+1]) + out[kvs[i]] = vp.Int() + + if err := vp.Err(); err != nil { + return nil, err + } + } + + return out, nil +} + +// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. +func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { + var nsp NetSockstatProtocol + for k, v := range kvs { + // Capture the range variable to ensure we get unique pointers for + // each of the optional fields. + v := v + switch k { + case "inuse": + nsp.InUse = v + case "orphan": + nsp.Orphan = &v + case "tw": + nsp.TW = &v + case "alloc": + nsp.Alloc = &v + case "mem": + nsp.Mem = &v + case "memory": + nsp.Memory = &v + } + } + + return nsp +} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go new file mode 100644 index 0000000000..db5debdf4a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -0,0 +1,102 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// For the proc file format details, +// See: +// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 +// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 +// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. + +// SoftnetStat contains a single row of data from /proc/net/softnet_stat +type SoftnetStat struct { + // Number of processed packets + Processed uint32 + // Number of dropped packets + Dropped uint32 + // Number of times processing packets ran out of quota + TimeSqueezed uint32 +} + +var softNetProcFile = "net/softnet_stat" + +// NetSoftnetStat reads data from /proc/net/softnet_stat. +func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { + b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile)) + if err != nil { + return nil, err + } + + entries, err := parseSoftnet(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err) + } + + return entries, nil +} + +func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { + const minColumns = 9 + + s := bufio.NewScanner(r) + + var stats []SoftnetStat + for s.Scan() { + columns := strings.Fields(s.Text()) + width := len(columns) + + if width < minColumns { + return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) + } + + // We only parse the first three columns at the moment. + us, err := parseHexUint32s(columns[0:3]) + if err != nil { + return nil, err + } + + stats = append(stats, SoftnetStat{ + Processed: us[0], + Dropped: us[1], + TimeSqueezed: us[2], + }) + } + + return stats, nil +} + +func parseHexUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go new file mode 100644 index 0000000000..d017e3f18d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_udp.go @@ -0,0 +1,229 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" +) + +const ( + // readLimit is used by io.LimitReader while reading the content of the + // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic + // as each line represents a single used socket. + // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. + // With e.g. 150 Byte per line and the maximum number of 65535, + // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. + readLimit = 4294967296 // Byte -> 4 GiB +) + +type ( + // NetUDP represents the contents of /proc/net/udp{,6} file without the header. + NetUDP []*netUDPLine + + // NetUDPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetUDP it does not collect + // the parsed lines into a slice. + NetUDPSummary struct { + // TxQueueLength shows the total queue length of all parsed tx_queue lengths. + TxQueueLength uint64 + // RxQueueLength shows the total queue length of all parsed rx_queue lengths. + RxQueueLength uint64 + // UsedSockets shows the total number of parsed lines representing the + // number of used sockets. + UsedSockets uint64 + } + + // netUDPLine represents the fields parsed from a single line + // in /proc/net/udp{,6}. Fields which are not used by UDP are skipped. + // For the proc file format details, see https://linux.die.net/man/5/proc. + netUDPLine struct { + Sl uint64 + LocalAddr net.IP + LocalPort uint64 + RemAddr net.IP + RemPort uint64 + St uint64 + TxQueue uint64 + RxQueue uint64 + UID uint64 + } +) + +// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp. +func (fs FS) NetUDP() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp")) +} + +// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp6. +func (fs FS) NetUDP6() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp6")) +} + +// NetUDPSummary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp. +func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp")) +} + +// NetUDP6Summary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp6. +func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp6")) +} + +// newNetUDP creates a new NetUDP{,6} from the contents of the given file. +func newNetUDP(file string) (NetUDP, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + netUDP := NetUDP{} + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetUDPLine(fields) + if err != nil { + return nil, err + } + netUDP = append(netUDP, line) + } + if err := s.Err(); err != nil { + return nil, err + } + return netUDP, nil +} + +// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file. +func newNetUDPSummary(file string) (*NetUDPSummary, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + netUDPSummary := &NetUDPSummary{} + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetUDPLine(fields) + if err != nil { + return nil, err + } + netUDPSummary.TxQueueLength += line.TxQueue + netUDPSummary.RxQueueLength += line.RxQueue + netUDPSummary.UsedSockets++ + } + if err := s.Err(); err != nil { + return nil, err + } + return netUDPSummary, nil +} + +// parseNetUDPLine parses a single line, represented by a list of fields. +func parseNetUDPLine(fields []string) (*netUDPLine, error) { + line := &netUDPLine{} + if len(fields) < 8 { + return nil, fmt.Errorf( + "cannot parse net udp socket line as it has less then 8 columns: %s", + strings.Join(fields, " "), + ) + } + var err error // parse error + + // sl + s := strings.Split(fields[0], ":") + if len(s) != 2 { + return nil, fmt.Errorf( + "cannot parse sl field in udp socket line: %s", fields[0]) + } + + if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err) + } + // local_address + l := strings.Split(fields[1], ":") + if len(l) != 2 { + return nil, fmt.Errorf( + "cannot parse local_address field in udp socket line: %s", fields[1]) + } + if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil { + return nil, fmt.Errorf( + "cannot parse local_address value in udp socket line: %s", err) + } + if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse local_address port value in udp socket line: %s", err) + } + + // remote_address + r := strings.Split(fields[2], ":") + if len(r) != 2 { + return nil, fmt.Errorf( + "cannot parse rem_address field in udp socket line: %s", fields[1]) + } + if line.RemAddr, err = hex.DecodeString(r[0]); err != nil { + return nil, fmt.Errorf( + "cannot parse rem_address value in udp socket line: %s", err) + } + if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse rem_address port value in udp socket line: %s", err) + } + + // st + if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse st value in udp socket line: %s", err) + } + + // tx_queue and rx_queue + q := strings.Split(fields[4], ":") + if len(q) != 2 { + return nil, fmt.Errorf( + "cannot parse tx/rx queues in udp socket line as it has a missing colon: %s", + fields[4], + ) + } + if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err) + } + if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err) + } + + // uid + if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse uid value in udp socket line: %s", err) + } + + return line, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go new file mode 100644 index 0000000000..c55b4b18e4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -0,0 +1,257 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// For the proc file format details, +// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 +// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. + +// Constants for the various /proc/net/unix enumerations. +// TODO: match against x/sys/unix or similar? +const ( + netUnixTypeStream = 1 + netUnixTypeDgram = 2 + netUnixTypeSeqpacket = 5 + + netUnixFlagDefault = 0 + netUnixFlagListen = 1 << 16 + + netUnixStateUnconnected = 1 + netUnixStateConnecting = 2 + netUnixStateConnected = 3 + netUnixStateDisconnected = 4 +) + +// NetUNIXType is the type of the type field. +type NetUNIXType uint64 + +// NetUNIXFlags is the type of the flags field. +type NetUNIXFlags uint64 + +// NetUNIXState is the type of the state field. +type NetUNIXState uint64 + +// NetUNIXLine represents a line of /proc/net/unix. +type NetUNIXLine struct { + KernelPtr string + RefCount uint64 + Protocol uint64 + Flags NetUNIXFlags + Type NetUNIXType + State NetUNIXState + Inode uint64 + Path string +} + +// NetUNIX holds the data read from /proc/net/unix. +type NetUNIX struct { + Rows []*NetUNIXLine +} + +// NetUNIX returns data read from /proc/net/unix. +func (fs FS) NetUNIX() (*NetUNIX, error) { + return readNetUNIX(fs.proc.Path("net/unix")) +} + +// readNetUNIX reads data in /proc/net/unix format from the specified file. +func readNetUNIX(file string) (*NetUNIX, error) { + // This file could be quite large and a streaming read is desirable versus + // reading the entire contents at once. + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + return parseNetUNIX(f) +} + +// parseNetUNIX creates a NetUnix structure from the incoming stream. +func parseNetUNIX(r io.Reader) (*NetUNIX, error) { + // Begin scanning by checking for the existence of Inode. + s := bufio.NewScanner(r) + s.Scan() + + // From the man page of proc(5), it does not contain an Inode field, + // but in actually it exists. This code works for both cases. + hasInode := strings.Contains(s.Text(), "Inode") + + // Expect a minimum number of fields, but Inode and Path are optional: + // Num RefCount Protocol Flags Type St Inode Path + minFields := 6 + if hasInode { + minFields++ + } + + var nu NetUNIX + for s.Scan() { + line := s.Text() + item, err := nu.parseLine(line, hasInode, minFields) + if err != nil { + return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err) + } + + nu.Rows = append(nu.Rows, item) + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err) + } + + return &nu, nil +} + +func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { + fields := strings.Fields(line) + + l := len(fields) + if l < min { + return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + } + + // Field offsets are as follows: + // Num RefCount Protocol Flags Type St Inode Path + + kernelPtr := strings.TrimSuffix(fields[0], ":") + + users, err := u.parseUsers(fields[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err) + } + + flags, err := u.parseFlags(fields[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err) + } + + typ, err := u.parseType(fields[4]) + if err != nil { + return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err) + } + + state, err := u.parseState(fields[5]) + if err != nil { + return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err) + } + + var inode uint64 + if hasInode { + inode, err = u.parseInode(fields[6]) + if err != nil { + return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err) + } + } + + n := &NetUNIXLine{ + KernelPtr: kernelPtr, + RefCount: users, + Type: typ, + Flags: flags, + State: state, + Inode: inode, + } + + // Path field is optional. + if l > min { + // Path occurs at either index 6 or 7 depending on whether inode is + // already present. + pathIdx := 7 + if !hasInode { + pathIdx-- + } + + n.Path = fields[pathIdx] + } + + return n, nil +} + +func (u NetUNIX) parseUsers(s string) (uint64, error) { + return strconv.ParseUint(s, 16, 32) +} + +func (u NetUNIX) parseType(s string) (NetUNIXType, error) { + typ, err := strconv.ParseUint(s, 16, 16) + if err != nil { + return 0, err + } + + return NetUNIXType(typ), nil +} + +func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { + flags, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return 0, err + } + + return NetUNIXFlags(flags), nil +} + +func (u NetUNIX) parseState(s string) (NetUNIXState, error) { + st, err := strconv.ParseInt(s, 16, 8) + if err != nil { + return 0, err + } + + return NetUNIXState(st), nil +} + +func (u NetUNIX) parseInode(s string) (uint64, error) { + return strconv.ParseUint(s, 10, 64) +} + +func (t NetUNIXType) String() string { + switch t { + case netUnixTypeStream: + return "stream" + case netUnixTypeDgram: + return "dgram" + case netUnixTypeSeqpacket: + return "seqpacket" + } + return "unknown" +} + +func (f NetUNIXFlags) String() string { + switch f { + case netUnixFlagListen: + return "listen" + default: + return "default" + } +} + +func (s NetUNIXState) String() string { + switch s { + case netUnixStateUnconnected: + return "unconnected" + case netUnixStateConnecting: + return "connecting" + case netUnixStateConnected: + return "connected" + case netUnixStateDisconnected: + return "disconnected" + } + return "unknown" +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 0000000000..9f97b6e523 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,319 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs fs.FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Proc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.proc.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.Proc(pid) +} + +// NewProc returns a process for the given pid. +// +// Deprecated: use fs.Proc() instead +func (fs FS) NewProc(pid int) (Proc, error) { + return fs.Proc(pid) +} + +// Proc returns a process for the given pid. +func (fs FS) Proc(pid int) (Proc, error) { + if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs.proc}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.proc.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs.proc}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + data, err := util.ReadFileNoStat(p.path("cmdline")) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil +} + +// Wchan returns the wchan (wait channel) of a process. +func (p Proc) Wchan() (string, error) { + f, err := os.Open(p.path("wchan")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + wchan := string(data) + if wchan == "" || wchan == "0" { + return "", nil + } + + return wchan, nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + data, err := util.ReadFileNoStat(p.path("comm")) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// Cwd returns the absolute path to the current working directory of the process. +func (p Proc) Cwd() (string, error) { + wd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return wd, err +} + +// RootDir returns the absolute path to the process's root directory (as set by chroot) +func (p Proc) RootDir() (string, error) { + rdir, err := os.Readlink(p.path("root")) + if os.IsNotExist(err) { + return "", nil + } + + return rdir, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +// MountInfo retrieves mount information for mount points in a +// process's namespace. +// It supplies information missing in `/proc/self/mounts` and +// fixes various other problems with that file too. +func (p Proc) MountInfo() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(p.path("mountinfo")) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} + +// FileDescriptorsInfo retrieves information about all file descriptors of +// the process. +func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + var fdinfos ProcFDInfos + + for _, n := range names { + fdinfo, err := p.FDInfo(n) + if err != nil { + continue + } + fdinfos = append(fdinfos, *fdinfo) + } + + return fdinfos, nil +} + +// Schedstat returns task scheduling information for the process. +func (p Proc) Schedstat() (ProcSchedstat, error) { + contents, err := ioutil.ReadFile(p.path("schedstat")) + if err != nil { + return ProcSchedstat{}, err + } + return parseProcSchedstat(string(contents)) +} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go new file mode 100644 index 0000000000..4abd46451c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -0,0 +1,98 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies +// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in +// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of +// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID +// in this hierarchy +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type Cgroup struct { + // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one + // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number + HierarchyID int + // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For + // Cgroups V2 this may be empty, as all active controllers use the same hierarchy + Controllers []string + // Path of this control group, relative to the mount point of the cgroupfs representing this specific + // hierarchy + Path string +} + +// parseCgroupString parses each line of the /proc/[pid]/cgroup file +// Line format is hierarchyID:[controller1,controller2]:path +func parseCgroupString(cgroupStr string) (*Cgroup, error) { + var err error + + fields := strings.Split(cgroupStr, ":") + if len(fields) < 3 { + return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) + } + + cgroup := &Cgroup{ + Path: fields[2], + Controllers: nil, + } + cgroup.HierarchyID, err = strconv.Atoi(fields[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse hierarchy ID") + } + if fields[1] != "" { + ssNames := strings.Split(fields[1], ",") + cgroup.Controllers = append(cgroup.Controllers, ssNames...) + } + return cgroup, nil +} + +// parseCgroups reads each line of the /proc/[pid]/cgroup file +func parseCgroups(data []byte) ([]Cgroup, error) { + var cgroups []Cgroup + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseCgroupString(mountString) + if err != nil { + return nil, err + } + cgroups = append(cgroups, *parsedMounts) + } + + err := scanner.Err() + return cgroups, err +} + +// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process +// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, +// so the len of the returned struct is equal to the number of active hierarchies on this system +func (p Proc) Cgroups() ([]Cgroup, error) { + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID)) + if err != nil { + return nil, err + } + return parseCgroups(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go new file mode 100644 index 0000000000..6134b3580c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Environ reads process environments from /proc//environ +func (p Proc) Environ() ([]string, error) { + environments := make([]string, 0) + + data, err := util.ReadFileNoStat(p.path("environ")) + if err != nil { + return environments, err + } + + environments = strings.Split(string(data), "\000") + if len(environments) > 0 { + environments = environments[:len(environments)-1] + } + + return environments, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go new file mode 100644 index 0000000000..a76ca70791 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -0,0 +1,133 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "regexp" + + "github.com/prometheus/procfs/internal/util" +) + +// Regexp variables +var ( + rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) + rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) + rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rInotify = regexp.MustCompile(`^inotify`) + rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) +) + +// ProcFDInfo contains represents file descriptor information. +type ProcFDInfo struct { + // File descriptor + FD string + // File offset + Pos string + // File access mode and status flags + Flags string + // Mount point ID + MntID string + // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) + InotifyInfos []InotifyInfo +} + +// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. +func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { + data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) + if err != nil { + return nil, err + } + + var text, pos, flags, mntid string + var inotify []InotifyInfo + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + text = scanner.Text() + if rPos.MatchString(text) { + pos = rPos.FindStringSubmatch(text)[1] + } else if rFlags.MatchString(text) { + flags = rFlags.FindStringSubmatch(text)[1] + } else if rMntID.MatchString(text) { + mntid = rMntID.FindStringSubmatch(text)[1] + } else if rInotify.MatchString(text) { + newInotify, err := parseInotifyInfo(text) + if err != nil { + return nil, err + } + inotify = append(inotify, *newInotify) + } + } + + i := &ProcFDInfo{ + FD: fd, + Pos: pos, + Flags: flags, + MntID: mntid, + InotifyInfos: inotify, + } + + return i, nil +} + +// InotifyInfo represents a single inotify line in the fdinfo file. +type InotifyInfo struct { + // Watch descriptor number + WD string + // Inode number + Ino string + // Device ID + Sdev string + // Mask of events being monitored + Mask string +} + +// InotifyInfo constructor. Only available on kernel 3.8+. +func parseInotifyInfo(line string) (*InotifyInfo, error) { + m := rInotifyParts.FindStringSubmatch(line) + if len(m) >= 4 { + var mask string + if len(m) == 5 { + mask = m[4] + } + i := &InotifyInfo{ + WD: m[1], + Ino: m[2], + Sdev: m[3], + Mask: mask, + } + return i, nil + } + return nil, errors.New("invalid inode entry: " + line) +} + +// ProcFDInfos represents a list of ProcFDInfo structs. +type ProcFDInfos []ProcFDInfo + +func (p ProcFDInfos) Len() int { return len(p) } +func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } + +// InotifyWatchLen returns the total number of inotify watches +func (p ProcFDInfos) InotifyWatchLen() (int, error) { + length := 0 + for _, f := range p { + length += len(f.InotifyInfos) + } + + return length, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 0000000000..776f349717 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// IO creates a new ProcIO instance from a given Proc instance. +func (p Proc) IO() (ProcIO, error) { + pio := ProcIO{} + + data, err := util.ReadFileNoStat(p.path("io")) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + + return pio, err +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 0000000000..91ee24df8b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,157 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int64 + // Maximum size of files that the process may create. + FileSize int64 + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int64 + // Maximum size of the process stack in bytes. + StackSize int64 + // Maximum size of a core file. + CoreFileSize int64 + // Limit of the process's resident set in pages. + ResidentSet int64 + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int64 + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int64 + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int64 + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int64 + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int64 + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int64 + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int64 + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int64 + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int64 + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int64 +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +// +// Deprecated: use p.Limits() instead +func (p Proc) NewLimits() (ProcLimits, error) { + return p.Limits() +} + +// Limits returns the current soft limits of the process. +func (p Proc) Limits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int64, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return i, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go new file mode 100644 index 0000000000..1d7772d516 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -0,0 +1,209 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// ProcMapPermissions contains permission settings read from /proc/[pid]/maps +type ProcMapPermissions struct { + // mapping has the [R]ead flag set + Read bool + // mapping has the [W]rite flag set + Write bool + // mapping has the [X]ecutable flag set + Execute bool + // mapping has the [S]hared flag set + Shared bool + // mapping is marked as [P]rivate (copy on write) + Private bool +} + +// ProcMap contains the process memory-mappings of the process, +// read from /proc/[pid]/maps +type ProcMap struct { + // The start address of current mapping. + StartAddr uintptr + // The end address of the current mapping + EndAddr uintptr + // The permissions for this mapping + Perms *ProcMapPermissions + // The current offset into the file/fd (e.g., shared libs) + Offset int64 + // Device owner of this mapping (major:minor) in Mkdev format. + Dev uint64 + // The inode of the device above + Inode uint64 + // The file or psuedofile (or empty==anonymous) + Pathname string +} + +// parseDevice parses the device token of a line and converts it to a dev_t +// (mkdev) like structure. +func parseDevice(s string) (uint64, error) { + toks := strings.Split(s, ":") + if len(toks) < 2 { + return 0, fmt.Errorf("unexpected number of fields") + } + + major, err := strconv.ParseUint(toks[0], 16, 0) + if err != nil { + return 0, err + } + + minor, err := strconv.ParseUint(toks[1], 16, 0) + if err != nil { + return 0, err + } + + return unix.Mkdev(uint32(major), uint32(minor)), nil +} + +// parseAddress just converts a hex-string to a uintptr +func parseAddress(s string) (uintptr, error) { + a, err := strconv.ParseUint(s, 16, 0) + if err != nil { + return 0, err + } + + return uintptr(a), nil +} + +// parseAddresses parses the start-end address +func parseAddresses(s string) (uintptr, uintptr, error) { + toks := strings.Split(s, "-") + if len(toks) < 2 { + return 0, 0, fmt.Errorf("invalid address") + } + + saddr, err := parseAddress(toks[0]) + if err != nil { + return 0, 0, err + } + + eaddr, err := parseAddress(toks[1]) + if err != nil { + return 0, 0, err + } + + return saddr, eaddr, nil +} + +// parsePermissions parses a token and returns any that are set. +func parsePermissions(s string) (*ProcMapPermissions, error) { + if len(s) < 4 { + return nil, fmt.Errorf("invalid permissions token") + } + + perms := ProcMapPermissions{} + for _, ch := range s { + switch ch { + case 'r': + perms.Read = true + case 'w': + perms.Write = true + case 'x': + perms.Execute = true + case 'p': + perms.Private = true + case 's': + perms.Shared = true + } + } + + return &perms, nil +} + +// parseProcMap will attempt to parse a single line within a proc/[pid]/maps +// buffer. +func parseProcMap(text string) (*ProcMap, error) { + fields := strings.Fields(text) + if len(fields) < 5 { + return nil, fmt.Errorf("truncated procmap entry") + } + + saddr, eaddr, err := parseAddresses(fields[0]) + if err != nil { + return nil, err + } + + perms, err := parsePermissions(fields[1]) + if err != nil { + return nil, err + } + + offset, err := strconv.ParseInt(fields[2], 16, 0) + if err != nil { + return nil, err + } + + device, err := parseDevice(fields[3]) + if err != nil { + return nil, err + } + + inode, err := strconv.ParseUint(fields[4], 10, 0) + if err != nil { + return nil, err + } + + pathname := "" + + if len(fields) >= 5 { + pathname = strings.Join(fields[5:], " ") + } + + return &ProcMap{ + StartAddr: saddr, + EndAddr: eaddr, + Perms: perms, + Offset: offset, + Dev: device, + Inode: inode, + Pathname: pathname, + }, nil +} + +// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the +// process. +func (p Proc) ProcMaps() ([]*ProcMap, error) { + file, err := os.Open(p.path("maps")) + if err != nil { + return nil, err + } + defer file.Close() + + maps := []*ProcMap{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + m, err := parseProcMap(scan.Text()) + if err != nil { + return nil, err + } + + maps = append(maps, m) + } + + return maps, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 0000000000..c66740ff74 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// Namespaces reads from /proc//ns/* to get the namespaces of which the +// process is a member. +func (p Proc) Namespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go new file mode 100644 index 0000000000..0d7bee54ca --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -0,0 +1,100 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// The PSI / pressure interface is described at +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt +// Each resource (cpu, io, memory, ...) is exposed as a single file. +// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. +// Each line contains several averages (over n seconds) and a total in µs. +// +// Example io pressure file: +// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 +// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" + +// PSILine is a single line of values as returned by /proc/pressure/* +// The Avg entries are averages over n seconds, as a percentage +// The Total line is in microseconds +type PSILine struct { + Avg10 float64 + Avg60 float64 + Avg300 float64 + Total uint64 +} + +// PSIStats represent pressure stall information from /proc/pressure/* +// Some indicates the share of time in which at least some tasks are stalled +// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +type PSIStats struct { + Some *PSILine + Full *PSILine +} + +// PSIStatsForResource reads pressure stall information for the specified +// resource from /proc/pressure/. At time of writing this can be +// either "cpu", "memory" or "io". +func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) + if err != nil { + return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) + } + + return parsePSIStats(resource, bytes.NewReader(data)) +} + +// parsePSIStats parses the specified file for pressure stall information +func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { + psiStats := PSIStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + l := scanner.Text() + prefix := strings.Split(l, " ")[0] + switch prefix { + case "some": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Some = &psi + case "full": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Full = &psi + default: + // If we encounter a line with an unknown prefix, ignore it and move on + // Should new measurement types be added in the future we'll simply ignore them instead + // of erroring on retrieval + continue + } + } + + return psiStats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go new file mode 100644 index 0000000000..a576a720a4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -0,0 +1,165 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "bufio" + "errors" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + // match the header line before each mapped zone in /proc/pid/smaps + procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) +) + +type ProcSMapsRollup struct { + // Amount of the mapping that is currently resident in RAM + Rss uint64 + // Process's proportional share of this mapping + Pss uint64 + // Size in bytes of clean shared pages + SharedClean uint64 + // Size in bytes of dirty shared pages + SharedDirty uint64 + // Size in bytes of clean private pages + PrivateClean uint64 + // Size in bytes of dirty private pages + PrivateDirty uint64 + // Amount of memory currently marked as referenced or accessed + Referenced uint64 + // Amount of memory that does not belong to any file + Anonymous uint64 + // Amount would-be-anonymous memory currently on swap + Swap uint64 + // Process's proportional memory on swap + SwapPss uint64 +} + +// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the +// process. +// +// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will +// we read and summed. +func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { + data, err := util.ReadFileNoStat(p.path("smaps_rollup")) + if err != nil && os.IsNotExist(err) { + return p.procSMapsRollupManual() + } + if err != nil { + return ProcSMapsRollup{}, err + } + + lines := strings.Split(string(data), "\n") + smaps := ProcSMapsRollup{} + + // skip first line which don't contains information we need + lines = lines[1:] + for _, line := range lines { + if line == "" { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +// Read /proc/pid/smaps and do the roll-up in Go code. +func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { + file, err := os.Open(p.path("smaps")) + if err != nil { + return ProcSMapsRollup{}, err + } + defer file.Close() + + smaps := ProcSMapsRollup{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + line := scan.Text() + + if procSMapsHeaderLine.MatchString(line) { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +func (s *ProcSMapsRollup) parseLine(line string) error { + kv := strings.SplitN(line, ":", 2) + if len(kv) != 2 { + fmt.Println(line) + return errors.New("invalid net/dev line, missing colon") + } + + k := kv[0] + if k == "VmFlags" { + return nil + } + + v := strings.TrimSpace(kv[1]) + v = strings.TrimRight(v, " kB") + + vKBytes, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return err + } + vBytes := vKBytes * 1024 + + s.addValue(k, v, vKBytes, vBytes) + + return nil +} + +func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Rss": + s.Rss += vUintBytes + case "Pss": + s.Pss += vUintBytes + case "Shared_Clean": + s.SharedClean += vUintBytes + case "Shared_Dirty": + s.SharedDirty += vUintBytes + case "Private_Clean": + s.PrivateClean += vUintBytes + case "Private_Dirty": + s.PrivateDirty += vUintBytes + case "Referenced": + s.Referenced += vUintBytes + case "Anonymous": + s.Anonymous += vUintBytes + case "Swap": + s.Swap += vUintBytes + case "SwapPss": + s.SwapPss += vUintBytes + } +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 0000000000..4517d2e9dd --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,192 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "os" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize uint + // Resident set size in pages. + RSS int + + proc fs.FS +} + +// NewStat returns the current status information of the process. +// +// Deprecated: use p.Stat() instead +func (p Proc) NewStat() (ProcStat, error) { + return p.Stat() +} + +// Stat returns the current status information of the process. +func (p Proc) Stat() (ProcStat, error) { + data, err := util.ReadFileNoStat(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, proc: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() uint { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + fs := FS{proc: s.proc} + stat, err := fs.Stat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go new file mode 100644 index 0000000000..6edd8333b3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -0,0 +1,170 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcStatus provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStatus struct { + // The process ID. + PID int + // The process name. + Name string + + // Thread group ID. + TGID int + + // Peak virtual memory size. + VmPeak uint64 // nolint:golint + // Virtual memory size. + VmSize uint64 // nolint:golint + // Locked memory size. + VmLck uint64 // nolint:golint + // Pinned memory size. + VmPin uint64 // nolint:golint + // Peak resident set size. + VmHWM uint64 // nolint:golint + // Resident set size (sum of RssAnnon RssFile and RssShmem). + VmRSS uint64 // nolint:golint + // Size of resident anonymous memory. + RssAnon uint64 // nolint:golint + // Size of resident file mappings. + RssFile uint64 // nolint:golint + // Size of resident shared memory. + RssShmem uint64 // nolint:golint + // Size of data segments. + VmData uint64 // nolint:golint + // Size of stack segments. + VmStk uint64 // nolint:golint + // Size of text segments. + VmExe uint64 // nolint:golint + // Shared library code size. + VmLib uint64 // nolint:golint + // Page table entries size. + VmPTE uint64 // nolint:golint + // Size of second-level page tables. + VmPMD uint64 // nolint:golint + // Swapped-out virtual memory size by anonymous private. + VmSwap uint64 // nolint:golint + // Size of hugetlb memory portions + HugetlbPages uint64 + + // Number of voluntary context switches. + VoluntaryCtxtSwitches uint64 + // Number of involuntary context switches. + NonVoluntaryCtxtSwitches uint64 + + // UIDs of the process (Real, effective, saved set, and filesystem UIDs) + UIDs [4]string + // GIDs of the process (Real, effective, saved set, and filesystem GIDs) + GIDs [4]string +} + +// NewStatus returns the current status information of the process. +func (p Proc) NewStatus() (ProcStatus, error) { + data, err := util.ReadFileNoStat(p.path("status")) + if err != nil { + return ProcStatus{}, err + } + + s := ProcStatus{PID: p.PID} + + lines := strings.Split(string(data), "\n") + for _, line := range lines { + if !bytes.Contains([]byte(line), []byte(":")) { + continue + } + + kv := strings.SplitN(line, ":", 2) + + // removes spaces + k := string(strings.TrimSpace(kv[0])) + v := string(strings.TrimSpace(kv[1])) + // removes "kB" + v = string(bytes.Trim([]byte(v), " kB")) + + // value to int when possible + // we can skip error check here, 'cause vKBytes is not used when value is a string + vKBytes, _ := strconv.ParseUint(v, 10, 64) + // convert kB to B + vBytes := vKBytes * 1024 + + s.fillStatus(k, v, vKBytes, vBytes) + } + + return s, nil +} + +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Tgid": + s.TGID = int(vUint) + case "Name": + s.Name = vString + case "Uid": + copy(s.UIDs[:], strings.Split(vString, "\t")) + case "Gid": + copy(s.GIDs[:], strings.Split(vString, "\t")) + case "VmPeak": + s.VmPeak = vUintBytes + case "VmSize": + s.VmSize = vUintBytes + case "VmLck": + s.VmLck = vUintBytes + case "VmPin": + s.VmPin = vUintBytes + case "VmHWM": + s.VmHWM = vUintBytes + case "VmRSS": + s.VmRSS = vUintBytes + case "RssAnon": + s.RssAnon = vUintBytes + case "RssFile": + s.RssFile = vUintBytes + case "RssShmem": + s.RssShmem = vUintBytes + case "VmData": + s.VmData = vUintBytes + case "VmStk": + s.VmStk = vUintBytes + case "VmExe": + s.VmExe = vUintBytes + case "VmLib": + s.VmLib = vUintBytes + case "VmPTE": + s.VmPTE = vUintBytes + case "VmPMD": + s.VmPMD = vUintBytes + case "VmSwap": + s.VmSwap = vUintBytes + case "HugetlbPages": + s.HugetlbPages = vUintBytes + case "voluntary_ctxt_switches": + s.VoluntaryCtxtSwitches = vUint + case "nonvoluntary_ctxt_switches": + s.NonVoluntaryCtxtSwitches = vUint + } +} + +// TotalCtxtSwitches returns the total context switch. +func (s ProcStatus) TotalCtxtSwitches() uint64 { + return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches +} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go new file mode 100644 index 0000000000..a4c4089ac5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -0,0 +1,118 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "regexp" + "strconv" +) + +var ( + cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) + procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) +) + +// Schedstat contains scheduler statistics from /proc/schedstat +// +// See +// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt +// for a detailed description of what these numbers mean. +// +// Note the current kernel documentation claims some of the time units are in +// jiffies when they are actually in nanoseconds since 2.6.23 with the +// introduction of CFS. A fix to the documentation is pending. See +// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 +type Schedstat struct { + CPUs []*SchedstatCPU +} + +// SchedstatCPU contains the values from one "cpu" line +type SchedstatCPU struct { + CPUNum string + + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// ProcSchedstat contains the values from /proc//schedstat +type ProcSchedstat struct { + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// Schedstat reads data from /proc/schedstat +func (fs FS) Schedstat() (*Schedstat, error) { + file, err := os.Open(fs.proc.Path("schedstat")) + if err != nil { + return nil, err + } + defer file.Close() + + stats := &Schedstat{} + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + match := cpuLineRE.FindStringSubmatch(scanner.Text()) + if match != nil { + cpu := &SchedstatCPU{} + cpu.CPUNum = match[1] + + cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) + if err != nil { + continue + } + + cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) + if err != nil { + continue + } + + cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) + if err != nil { + continue + } + + stats.CPUs = append(stats.CPUs, cpu) + } + } + + return stats, nil +} + +func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) { + match := procLineRE.FindStringSubmatch(contents) + + if match != nil { + stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) + if err != nil { + return + } + + stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) + if err != nil { + return + } + + stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) + return + } + + err = errors.New("could not parse schedstat") + return +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 0000000000..b2a6fc994c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,244 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func NewStat() (Stat, error) { + fs, err := NewFS(fs.DefaultProcMountPoint) + if err != nil { + return Stat{}, err + } + return fs.Stat() +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func (fs FS) NewStat() (Stat, error) { + return fs.Stat() +} + +// Stat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Stat() (Stat, error) { + fileName := fs.proc.Path("stat") + data, err := util.ReadFileNoStat(fileName) + if err != nil { + return Stat{}, err + } + + stat := Stat{} + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err) + } + + return stat, nil +} diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go new file mode 100644 index 0000000000..15edc2212b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -0,0 +1,89 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Swap represents an entry in /proc/swaps. +type Swap struct { + Filename string + Type string + Size int + Used int + Priority int +} + +// Swaps returns a slice of all configured swap devices on the system. +func (fs FS) Swaps() ([]*Swap, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) + if err != nil { + return nil, err + } + return parseSwaps(data) +} + +func parseSwaps(info []byte) ([]*Swap, error) { + swaps := []*Swap{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + scanner.Scan() // ignore header line + for scanner.Scan() { + swapString := scanner.Text() + parsedSwap, err := parseSwapString(swapString) + if err != nil { + return nil, err + } + swaps = append(swaps, parsedSwap) + } + + err := scanner.Err() + return swaps, err +} + +func parseSwapString(swapString string) (*Swap, error) { + var err error + + swapFields := strings.Fields(swapString) + swapLength := len(swapFields) + if swapLength < 5 { + return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + } + + swap := &Swap{ + Filename: swapFields[0], + Type: swapFields[1], + } + + swap.Size, err = strconv.Atoi(swapFields[2]) + if err != nil { + return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + } + swap.Used, err = strconv.Atoi(swapFields[3]) + if err != nil { + return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + } + swap.Priority, err = strconv.Atoi(swapFields[4]) + if err != nil { + return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + } + + return swap, nil +} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar new file mode 100644 index 0000000000..19ef02b8d4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ttar @@ -0,0 +1,413 @@ +#!/usr/bin/env bash + +# Purpose: plain text tar format +# Limitations: - only suitable for text files, directories, and symlinks +# - stores only filename, content, and mode +# - not designed for untrusted input +# +# Note: must work with bash version 3.2 (macOS) + +# Copyright 2017 Roger Luethi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit -o nounset + +# Sanitize environment (for instance, standard sorting of glob matches) +export LC_ALL=C + +path="" +CMD="" +ARG_STRING="$*" + +#------------------------------------------------------------------------------ +# Not all sed implementations can work on null bytes. In order to make ttar +# work out of the box on macOS, use Python as a stream editor. + +USE_PYTHON=0 + +PYTHON_CREATE_FILTER=$(cat << 'PCF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'EOF', r'\EOF', line) + line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) + line = re.sub('\x00', r'NULLBYTE', line) + sys.stdout.write(line) +PCF +) + +PYTHON_EXTRACT_FILTER=$(cat << 'PEF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'(?/dev/null; then + echo "ERROR Python not found. Aborting." + exit 2 + fi + USE_PYTHON=1 + fi +} + +#------------------------------------------------------------------------------ + +function usage { + bname=$(basename "$0") + cat << USAGE +Usage: $bname [-C ] -c -f (create archive) + $bname -t -f (list archive contents) + $bname [-C ] -x -f (extract archive) + +Options: + -C (change directory) + -v (verbose) + --recursive-unlink (recursively delete existing directory if path + collides with file or directory to extract) + +Example: Change to sysfs directory, create ttar file from fixtures directory + $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ +USAGE +exit "$1" +} + +function vecho { + if [ "${VERBOSE:-}" == "yes" ]; then + echo >&7 "$@" + fi +} + +function set_cmd { + if [ -n "$CMD" ]; then + echo "ERROR: more than one command given" + echo + usage 2 + fi + CMD=$1 +} + +unset VERBOSE +unset RECURSIVE_UNLINK + +while getopts :cf:-:htxvC: opt; do + case $opt in + c) + set_cmd "create" + ;; + f) + ARCHIVE=$OPTARG + ;; + h) + usage 0 + ;; + t) + set_cmd "list" + ;; + x) + set_cmd "extract" + ;; + v) + VERBOSE=yes + exec 7>&1 + ;; + C) + CDIR=$OPTARG + ;; + -) + case $OPTARG in + recursive-unlink) + RECURSIVE_UNLINK="yes" + ;; + *) + echo -e "Error: invalid option -$OPTARG" + echo + usage 1 + ;; + esac + ;; + *) + echo >&2 "ERROR: invalid option -$OPTARG" + echo + usage 1 + ;; + esac +done + +# Remove processed options from arguments +shift $(( OPTIND - 1 )); + +if [ "${CMD:-}" == "" ]; then + echo >&2 "ERROR: no command given" + echo + usage 1 +elif [ "${ARCHIVE:-}" == "" ]; then + echo >&2 "ERROR: no archive name given" + echo + usage 1 +fi + +function list { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while read -r line; do + line_no=$(( line_no + 1 )) + if [ $size -gt 0 ]; then + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + echo "$path" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + echo "$path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + echo "$path -> ${BASH_REMATCH[1]}" + fi + done < "$ttar_file" +} + +function extract { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while IFS= read -r line; do + line_no=$(( line_no + 1 )) + local eof_without_newline + if [ "$size" -gt 0 ]; then + if [[ "$line" =~ [^\\]EOF ]]; then + # An EOF not preceded by a backslash indicates that the line + # does not end with a newline + eof_without_newline=1 + else + eof_without_newline=0 + fi + # Replace NULLBYTE with null byte if at beginning of line + # Replace NULLBYTE with null byte unless preceded by backslash + # Remove one backslash in front of NULLBYTE (if any) + # Remove EOF unless preceded by backslash + # Remove one backslash in front of EOF + if [ $USE_PYTHON -eq 1 ]; then + echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" + else + # The repeated pattern makes up for sed's lack of negative + # lookbehind assertions (for consecutive null bytes). + echo -n "$line" | \ + sed -e 's/^NULLBYTE/\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\\NULLBYTE/NULLBYTE/g; + s/\([^\\]\)EOF/\1/g; + s/\\EOF/EOF/g; + ' >> "$path" + fi + if [[ "$eof_without_newline" -eq 0 ]]; then + echo >> "$path" + fi + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + if [ -L "$path" ]; then + rm "$path" + elif [ -d "$path" ]; then + if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then + rm -r "$path" + else + # Safe because symlinks to directories are dealt with above + rmdir "$path" + fi + elif [ -e "$path" ]; then + rm "$path" + fi + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + # Create file even if it is zero-length. + touch "$path" + vecho " $path" + elif [[ $line =~ ^Mode:\ (.*)$ ]]; then + mode=${BASH_REMATCH[1]} + chmod "$mode" "$path" + vecho "$mode" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + mkdir -p "$path" + vecho " $path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + ln -s "${BASH_REMATCH[1]}" "$path" + vecho " $path -> ${BASH_REMATCH[1]}" + elif [[ $line =~ ^# ]]; then + # Ignore comments between files + continue + else + echo >&2 "ERROR: Unknown keyword on line $line_no: $line" + exit 1 + fi + done < "$ttar_file" +} + +function div { + echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ + "- - - - - -" +} + +function get_mode { + local mfile=$1 + if [ -z "${STAT_OPTION:-}" ]; then + if stat -c '%a' "$mfile" >/dev/null 2>&1; then + # GNU stat + STAT_OPTION='-c' + STAT_FORMAT='%a' + else + # BSD stat + STAT_OPTION='-f' + # Octal output, user/group/other (omit file type, sticky bit) + STAT_FORMAT='%OLp' + fi + fi + stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" +} + +function _create { + shopt -s nullglob + local mode + local eof_without_newline + while (( "$#" )); do + file=$1 + if [ -L "$file" ]; then + echo "Path: $file" + symlinkTo=$(readlink "$file") + echo "SymlinkTo: $symlinkTo" + vecho " $file -> $symlinkTo" + div + elif [ -d "$file" ]; then + # Strip trailing slash (if there is one) + file=${file%/} + echo "Directory: $file" + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file/" + div + # Find all files and dirs, including hidden/dot files + for x in "$file/"{*,.[^.]*}; do + _create "$x" + done + elif [ -f "$file" ]; then + echo "Path: $file" + lines=$(wc -l "$file"|awk '{print $1}') + eof_without_newline=0 + if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ + [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then + eof_without_newline=1 + lines=$((lines+1)) + fi + echo "Lines: $lines" + # Add backslash in front of EOF + # Add backslash in front of NULLBYTE + # Replace null byte with NULLBYTE + if [ $USE_PYTHON -eq 1 ]; then + < "$file" python -c "$PYTHON_CREATE_FILTER" + else + < "$file" \ + sed 's/EOF/\\EOF/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; + ' + fi + if [[ "$eof_without_newline" -eq 1 ]]; then + # Finish line with EOF to indicate that the original line did + # not end with a linefeed + echo "EOF" + fi + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file" + div + else + echo >&2 "ERROR: file not found ($file in $(pwd))" + exit 2 + fi + shift + done +} + +function create { + ttar_file=$1 + shift + if [ -z "${1:-}" ]; then + echo >&2 "ERROR: missing arguments." + echo + usage 1 + fi + if [ -e "$ttar_file" ]; then + rm "$ttar_file" + fi + exec > "$ttar_file" + echo "# Archive created by ttar $ARG_STRING" + _create "$@" +} + +test_environment + +if [ -n "${CDIR:-}" ]; then + if [[ "$ARCHIVE" != /* ]]; then + # Relative path: preserve the archive's location before changing + # directory + ARCHIVE="$(pwd)/$ARCHIVE" + fi + cd "$CDIR" +fi + +"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go new file mode 100644 index 0000000000..cb13891414 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// The VM interface is described at +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// Each setting is exposed as a single file. +// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array +// and numa_zonelist_order (deprecated) which is a string +type VM struct { + AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes + BlockDump *int64 // /proc/sys/vm/block_dump + CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed + DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes + DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio + DirtyBytes *int64 // /proc/sys/vm/dirty_bytes + DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs + DirtyRatio *int64 // /proc/sys/vm/dirty_ratio + DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds + DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs + DropCaches *int64 // /proc/sys/vm/drop_caches + ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold + HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group + LaptopMode *int64 // /proc/sys/vm/laptop_mode + LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout + LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio + MaxMapCount *int64 // /proc/sys/vm/max_map_count + MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill + MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery + MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes + MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio + MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio + MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr + NrHugepages *int64 // /proc/sys/vm/nr_hugepages + NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy + NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages + NumaStat *int64 // /proc/sys/vm/numa_stat + NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order + OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks + OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task + OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes + OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory + OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio + PageCluster *int64 // /proc/sys/vm/page-cluster + PanicOnOom *int64 // /proc/sys/vm/panic_on_oom + PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction + StatInterval *int64 // /proc/sys/vm/stat_interval + Swappiness *int64 // /proc/sys/vm/swappiness + UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes + VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure + WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor + WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor + ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode +} + +// VM reads the VM statistics from the specified `proc` filesystem. +func (fs FS) VM() (*VM, error) { + path := fs.proc.Path("sys/vm") + file, err := os.Stat(path) + if err != nil { + return nil, err + } + if !file.Mode().IsDir() { + return nil, fmt.Errorf("%s is not a directory", path) + } + + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + var vm VM + for _, f := range files { + if f.IsDir() { + continue + } + + name := filepath.Join(path, f.Name()) + // ignore errors on read, as there are some write only + // in /proc/sys/vm + value, err := util.SysReadFile(name) + if err != nil { + continue + } + vp := util.NewValueParser(value) + + switch f.Name() { + case "admin_reserve_kbytes": + vm.AdminReserveKbytes = vp.PInt64() + case "block_dump": + vm.BlockDump = vp.PInt64() + case "compact_unevictable_allowed": + vm.CompactUnevictableAllowed = vp.PInt64() + case "dirty_background_bytes": + vm.DirtyBackgroundBytes = vp.PInt64() + case "dirty_background_ratio": + vm.DirtyBackgroundRatio = vp.PInt64() + case "dirty_bytes": + vm.DirtyBytes = vp.PInt64() + case "dirty_expire_centisecs": + vm.DirtyExpireCentisecs = vp.PInt64() + case "dirty_ratio": + vm.DirtyRatio = vp.PInt64() + case "dirtytime_expire_seconds": + vm.DirtytimeExpireSeconds = vp.PInt64() + case "dirty_writeback_centisecs": + vm.DirtyWritebackCentisecs = vp.PInt64() + case "drop_caches": + vm.DropCaches = vp.PInt64() + case "extfrag_threshold": + vm.ExtfragThreshold = vp.PInt64() + case "hugetlb_shm_group": + vm.HugetlbShmGroup = vp.PInt64() + case "laptop_mode": + vm.LaptopMode = vp.PInt64() + case "legacy_va_layout": + vm.LegacyVaLayout = vp.PInt64() + case "lowmem_reserve_ratio": + stringSlice := strings.Fields(value) + pint64Slice := make([]*int64, 0, len(stringSlice)) + for _, value := range stringSlice { + vp := util.NewValueParser(value) + pint64Slice = append(pint64Slice, vp.PInt64()) + } + vm.LowmemReserveRatio = pint64Slice + case "max_map_count": + vm.MaxMapCount = vp.PInt64() + case "memory_failure_early_kill": + vm.MemoryFailureEarlyKill = vp.PInt64() + case "memory_failure_recovery": + vm.MemoryFailureRecovery = vp.PInt64() + case "min_free_kbytes": + vm.MinFreeKbytes = vp.PInt64() + case "min_slab_ratio": + vm.MinSlabRatio = vp.PInt64() + case "min_unmapped_ratio": + vm.MinUnmappedRatio = vp.PInt64() + case "mmap_min_addr": + vm.MmapMinAddr = vp.PInt64() + case "nr_hugepages": + vm.NrHugepages = vp.PInt64() + case "nr_hugepages_mempolicy": + vm.NrHugepagesMempolicy = vp.PInt64() + case "nr_overcommit_hugepages": + vm.NrOvercommitHugepages = vp.PInt64() + case "numa_stat": + vm.NumaStat = vp.PInt64() + case "numa_zonelist_order": + vm.NumaZonelistOrder = value + case "oom_dump_tasks": + vm.OomDumpTasks = vp.PInt64() + case "oom_kill_allocating_task": + vm.OomKillAllocatingTask = vp.PInt64() + case "overcommit_kbytes": + vm.OvercommitKbytes = vp.PInt64() + case "overcommit_memory": + vm.OvercommitMemory = vp.PInt64() + case "overcommit_ratio": + vm.OvercommitRatio = vp.PInt64() + case "page-cluster": + vm.PageCluster = vp.PInt64() + case "panic_on_oom": + vm.PanicOnOom = vp.PInt64() + case "percpu_pagelist_fraction": + vm.PercpuPagelistFraction = vp.PInt64() + case "stat_interval": + vm.StatInterval = vp.PInt64() + case "swappiness": + vm.Swappiness = vp.PInt64() + case "user_reserve_kbytes": + vm.UserReserveKbytes = vp.PInt64() + case "vfs_cache_pressure": + vm.VfsCachePressure = vp.PInt64() + case "watermark_boost_factor": + vm.WatermarkBoostFactor = vp.PInt64() + case "watermark_scale_factor": + vm.WatermarkScaleFactor = vp.PInt64() + case "zone_reclaim_mode": + vm.ZoneReclaimMode = vp.PInt64() + } + if err := vp.Err(); err != nil { + return nil, err + } + } + + return &vm, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 0000000000..30aa417d53 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.proc.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldn't parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go new file mode 100644 index 0000000000..e941503d5c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "regexp" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Zoneinfo holds info parsed from /proc/zoneinfo. +type Zoneinfo struct { + Node string + Zone string + NrFreePages *int64 + Min *int64 + Low *int64 + High *int64 + Scanned *int64 + Spanned *int64 + Present *int64 + Managed *int64 + NrActiveAnon *int64 + NrInactiveAnon *int64 + NrIsolatedAnon *int64 + NrAnonPages *int64 + NrAnonTransparentHugepages *int64 + NrActiveFile *int64 + NrInactiveFile *int64 + NrIsolatedFile *int64 + NrFilePages *int64 + NrSlabReclaimable *int64 + NrSlabUnreclaimable *int64 + NrMlockStack *int64 + NrKernelStack *int64 + NrMapped *int64 + NrDirty *int64 + NrWriteback *int64 + NrUnevictable *int64 + NrShmem *int64 + NrDirtied *int64 + NrWritten *int64 + NumaHit *int64 + NumaMiss *int64 + NumaForeign *int64 + NumaInterleave *int64 + NumaLocal *int64 + NumaOther *int64 + Protection []*int64 +} + +var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) + +// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of +// structs containing the relevant info. More information available here: +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +func (fs FS) Zoneinfo() ([]Zoneinfo, error) { + data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) + if err != nil { + return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) + } + zoneinfo, err := parseZoneinfo(data) + if err != nil { + return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) + } + return zoneinfo, nil +} + +func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { + + zoneinfo := []Zoneinfo{} + + zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) + for _, block := range zoneinfoBlocks { + var zoneinfoElement Zoneinfo + lines := strings.Split(string(block), "\n") + for _, line := range lines { + + if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { + zoneinfoElement.Node = nodeZone[1] + zoneinfoElement.Zone = nodeZone[2] + continue + } + if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { + zoneinfoElement.Zone = "" + continue + } + parts := strings.Fields(strings.TrimSpace(line)) + if len(parts) < 2 { + continue + } + vp := util.NewValueParser(parts[1]) + switch parts[0] { + case "nr_free_pages": + zoneinfoElement.NrFreePages = vp.PInt64() + case "min": + zoneinfoElement.Min = vp.PInt64() + case "low": + zoneinfoElement.Low = vp.PInt64() + case "high": + zoneinfoElement.High = vp.PInt64() + case "scanned": + zoneinfoElement.Scanned = vp.PInt64() + case "spanned": + zoneinfoElement.Spanned = vp.PInt64() + case "present": + zoneinfoElement.Present = vp.PInt64() + case "managed": + zoneinfoElement.Managed = vp.PInt64() + case "nr_active_anon": + zoneinfoElement.NrActiveAnon = vp.PInt64() + case "nr_inactive_anon": + zoneinfoElement.NrInactiveAnon = vp.PInt64() + case "nr_isolated_anon": + zoneinfoElement.NrIsolatedAnon = vp.PInt64() + case "nr_anon_pages": + zoneinfoElement.NrAnonPages = vp.PInt64() + case "nr_anon_transparent_hugepages": + zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() + case "nr_active_file": + zoneinfoElement.NrActiveFile = vp.PInt64() + case "nr_inactive_file": + zoneinfoElement.NrInactiveFile = vp.PInt64() + case "nr_isolated_file": + zoneinfoElement.NrIsolatedFile = vp.PInt64() + case "nr_file_pages": + zoneinfoElement.NrFilePages = vp.PInt64() + case "nr_slab_reclaimable": + zoneinfoElement.NrSlabReclaimable = vp.PInt64() + case "nr_slab_unreclaimable": + zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() + case "nr_mlock_stack": + zoneinfoElement.NrMlockStack = vp.PInt64() + case "nr_kernel_stack": + zoneinfoElement.NrKernelStack = vp.PInt64() + case "nr_mapped": + zoneinfoElement.NrMapped = vp.PInt64() + case "nr_dirty": + zoneinfoElement.NrDirty = vp.PInt64() + case "nr_writeback": + zoneinfoElement.NrWriteback = vp.PInt64() + case "nr_unevictable": + zoneinfoElement.NrUnevictable = vp.PInt64() + case "nr_shmem": + zoneinfoElement.NrShmem = vp.PInt64() + case "nr_dirtied": + zoneinfoElement.NrDirtied = vp.PInt64() + case "nr_written": + zoneinfoElement.NrWritten = vp.PInt64() + case "numa_hit": + zoneinfoElement.NumaHit = vp.PInt64() + case "numa_miss": + zoneinfoElement.NumaMiss = vp.PInt64() + case "numa_foreign": + zoneinfoElement.NumaForeign = vp.PInt64() + case "numa_interleave": + zoneinfoElement.NumaInterleave = vp.PInt64() + case "numa_local": + zoneinfoElement.NumaLocal = vp.PInt64() + case "numa_other": + zoneinfoElement.NumaOther = vp.PInt64() + case "protection:": + protectionParts := strings.Split(line, ":") + protectionValues := strings.Replace(protectionParts[1], "(", "", 1) + protectionValues = strings.Replace(protectionValues, ")", "", 1) + protectionValues = strings.TrimSpace(protectionValues) + protectionStringMap := strings.Split(protectionValues, ", ") + val, err := util.ParsePInt64s(protectionStringMap) + if err == nil { + zoneinfoElement.Protection = val + } + } + + } + + zoneinfo = append(zoneinfo, zoneinfoElement) + } + return zoneinfo, nil +} diff --git a/vendor/k8s.io/component-base/metrics/OWNERS b/vendor/k8s.io/component-base/metrics/OWNERS new file mode 100644 index 0000000000..a9c3172f39 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-instrumentation-approvers +- logicalhan +- RainbowMango +reviewers: +- sig-instrumentation-reviewers +labels: +- sig/instrumentation diff --git a/vendor/k8s.io/component-base/metrics/collector.go b/vendor/k8s.io/component-base/metrics/collector.go new file mode 100644 index 0000000000..090342e162 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/collector.go @@ -0,0 +1,190 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// StableCollector extends the prometheus.Collector interface to allow customization of the +// metric registration process, it's especially intend to be used in scenario of custom collector. +type StableCollector interface { + prometheus.Collector + + // DescribeWithStability sends the super-set of all possible metrics.Desc collected + // by this StableCollector to the provided channel. + DescribeWithStability(chan<- *Desc) + + // CollectWithStability sends each collected metrics.Metric via the provide channel. + CollectWithStability(chan<- Metric) + + // Create will initialize all Desc and it intends to be called by registry. + Create(version *semver.Version, self StableCollector) bool + + // ClearState will clear all the states marked by Create. + ClearState() + + // HiddenMetrics tells the list of hidden metrics with fqName. + HiddenMetrics() []string +} + +// BaseStableCollector which implements almost all of the methods defined by StableCollector +// is a convenient assistant for custom collectors. +// It is recommend that inherit BaseStableCollector when implementing custom collectors. +type BaseStableCollector struct { + descriptors map[string]*Desc // stores all descriptors by pair, these are collected from DescribeWithStability(). + registrable map[string]*Desc // stores registrable descriptors by pair, is a subset of descriptors. + hidden map[string]*Desc // stores hidden descriptors by pair, is a subset of descriptors. + self StableCollector +} + +// DescribeWithStability sends all descriptors to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) DescribeWithStability(ch chan<- *Desc) { + panic(fmt.Errorf("custom collector should over-write DescribeWithStability method")) +} + +// Describe sends all descriptors to the provided channel. +// It intend to be called by prometheus registry. +func (bsc *BaseStableCollector) Describe(ch chan<- *prometheus.Desc) { + for _, d := range bsc.registrable { + ch <- d.toPrometheusDesc() + } +} + +// CollectWithStability sends all metrics to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) CollectWithStability(ch chan<- Metric) { + panic(fmt.Errorf("custom collector should over-write CollectWithStability method")) +} + +// Collect is called by the Prometheus registry when collecting metrics. +func (bsc *BaseStableCollector) Collect(ch chan<- prometheus.Metric) { + mch := make(chan Metric) + + go func() { + bsc.self.CollectWithStability(mch) + close(mch) + }() + + for m := range mch { + // nil Metric usually means hidden metrics + if m == nil { + continue + } + + ch <- prometheus.Metric(m) + } +} + +func (bsc *BaseStableCollector) add(d *Desc) { + if len(d.fqName) == 0 { + panic("nameless metrics will be not allowed") + } + + if bsc.descriptors == nil { + bsc.descriptors = make(map[string]*Desc) + } + + if _, exist := bsc.descriptors[d.fqName]; exist { + panic(fmt.Sprintf("duplicate metrics (%s) will be not allowed", d.fqName)) + } + + bsc.descriptors[d.fqName] = d +} + +// Init intends to be called by registry. +func (bsc *BaseStableCollector) init(self StableCollector) { + bsc.self = self + + dch := make(chan *Desc) + + // collect all possible descriptions from custom side + go func() { + bsc.self.DescribeWithStability(dch) + close(dch) + }() + + for d := range dch { + bsc.add(d) + } +} + +func (bsc *BaseStableCollector) trackRegistrableDescriptor(d *Desc) { + if bsc.registrable == nil { + bsc.registrable = make(map[string]*Desc) + } + + bsc.registrable[d.fqName] = d +} + +func (bsc *BaseStableCollector) trackHiddenDescriptor(d *Desc) { + if bsc.hidden == nil { + bsc.hidden = make(map[string]*Desc) + } + + bsc.hidden[d.fqName] = d +} + +// Create intends to be called by registry. +// Create will return true as long as there is one or more metrics not be hidden. +// Otherwise return false, that means the whole collector will be ignored by registry. +func (bsc *BaseStableCollector) Create(version *semver.Version, self StableCollector) bool { + bsc.init(self) + + for _, d := range bsc.descriptors { + d.create(version) + if d.IsHidden() { + bsc.trackHiddenDescriptor(d) + } else { + bsc.trackRegistrableDescriptor(d) + } + } + + if len(bsc.registrable) > 0 { + return true + } + + return false +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (bsc *BaseStableCollector) ClearState() { + for _, d := range bsc.descriptors { + d.ClearState() + } + + bsc.descriptors = nil + bsc.registrable = nil + bsc.hidden = nil + bsc.self = nil +} + +// HiddenMetrics tells the list of hidden metrics with fqName. +func (bsc *BaseStableCollector) HiddenMetrics() (fqNames []string) { + for i := range bsc.hidden { + fqNames = append(fqNames, bsc.hidden[i].fqName) + } + return +} + +// Check if our BaseStableCollector implements necessary interface +var _ StableCollector = &BaseStableCollector{} diff --git a/vendor/k8s.io/component-base/metrics/counter.go b/vendor/k8s.io/component-base/metrics/counter.go new file mode 100644 index 0000000000..de69431095 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/counter.go @@ -0,0 +1,178 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// Counter is our internal representation for our wrapping struct around prometheus +// counters. Counter implements both kubeCollector and CounterMetric. +type Counter struct { + CounterMetric + *CounterOpts + lazyMetric + selfCollector +} + +// NewCounter returns an object which satisfies the kubeCollector and CounterMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewCounter(opts *CounterOpts) *Counter { + opts.StabilityLevel.setDefaults() + + kc := &Counter{ + CounterOpts: opts, + lazyMetric: lazyMetric{}, + } + kc.setPrometheusCounter(noop) + kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return kc +} + +// Reset resets the underlying prometheus Counter to start counting from 0 again +func (c *Counter) Reset() { + if !c.IsCreated() { + return + } + c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts())) +} + +// setPrometheusCounter sets the underlying CounterMetric object, i.e. the thing that does the measurement. +func (c *Counter) setPrometheusCounter(counter prometheus.Counter) { + c.CounterMetric = counter + c.initSelfCollection(counter) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (c *Counter) DeprecatedVersion() *semver.Version { + return parseSemver(c.CounterOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying Counter. Until this method is called +// the underlying counter is a no-op. +func (c *Counter) initializeMetric() { + c.CounterOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus counter. + c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Counter. Until this method +// is called the underlying counter is a no-op. +func (c *Counter) initializeDeprecatedMetric() { + c.CounterOpts.markDeprecated() + c.initializeMetric() +} + +// CounterVec is the internal representation of our wrapping struct around prometheus +// counterVecs. CounterVec implements both kubeCollector and CounterVecMetric. +type CounterVec struct { + *prometheus.CounterVec + *CounterOpts + lazyMetric + originalLabels []string +} + +// NewCounterVec returns an object which satisfies the kubeCollector and CounterVecMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec { + opts.StabilityLevel.setDefaults() + + cv := &CounterVec{ + CounterVec: noopCounterVec, + CounterOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + cv.lazyInit(cv, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *CounterVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.CounterOpts.DeprecatedVersion) + +} + +// initializeMetric invocation creates the actual underlying CounterVec. Until this method is called +// the underlying counterVec is a no-op. +func (v *CounterVec) initializeMetric() { + v.CounterOpts.annotateStabilityLevel() + v.CounterVec = prometheus.NewCounterVec(v.CounterOpts.toPromCounterOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) CounterVec. Until this method is called +// the underlying counterVec is a no-op. +func (v *CounterVec) initializeDeprecatedMetric() { + v.CounterOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/counter.go#L179-L197 + +// WithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created IFF the counterVec +// has been registered to a metrics registry. +func (v *CounterVec) WithLabelValues(lvs ...string) CounterMetric { + if !v.IsCreated() { + return noop // return no-op counter + } + return v.CounterVec.WithLabelValues(lvs...) +} + +// With returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created IFF the counterVec has +// been registered to a metrics registry. +func (v *CounterVec) With(labels map[string]string) CounterMetric { + if !v.IsCreated() { + return noop // return no-op counter + } + return v.CounterVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *CounterVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.CounterVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *CounterVec) Reset() { + if !v.IsCreated() { + return + } + + v.CounterVec.Reset() +} diff --git a/vendor/k8s.io/component-base/metrics/desc.go b/vendor/k8s.io/component-base/metrics/desc.go new file mode 100644 index 0000000000..e53e3ee189 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/desc.go @@ -0,0 +1,225 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/klog/v2" +) + +// Desc is a prometheus.Desc extension. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabels is the label names. Their label values are variable. + constLabels Labels + // variableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + + // promDesc is the descriptor used by every Prometheus Metric. + promDesc *prometheus.Desc + annotatedHelp string + + // stabilityLevel represents the API guarantees for a given defined metric. + stabilityLevel StabilityLevel + // deprecatedVersion represents in which version this metric be deprecated. + deprecatedVersion string + + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + deprecateOnce sync.Once + hideOnce sync.Once + annotateOnce sync.Once +} + +// NewDesc extends prometheus.NewDesc with stability support. +// +// The stabilityLevel should be valid stability label, such as "metrics.ALPHA" +// and "metrics.STABLE"(Maybe "metrics.BETA" in future). Default value "metrics.ALPHA" +// will be used in case of empty or invalid stability label. +// +// The deprecatedVersion represents in which version this Metric be deprecated. +// The deprecation policy outlined by the control plane metrics stability KEP. +func NewDesc(fqName string, help string, variableLabels []string, constLabels Labels, + stabilityLevel StabilityLevel, deprecatedVersion string) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + annotatedHelp: help, + variableLabels: variableLabels, + constLabels: constLabels, + stabilityLevel: stabilityLevel, + deprecatedVersion: deprecatedVersion, + } + d.stabilityLevel.setDefaults() + + return d +} + +// String formats the Desc as a string. +// The stability metadata maybe annotated in 'HELP' section if called after registry, +// otherwise not. +// e.g. "Desc{fqName: "normal_stable_descriptor", help: "[STABLE] this is a stable descriptor", constLabels: {}, variableLabels: []}" +func (d *Desc) String() string { + if d.isCreated { + return d.promDesc.String() + } + + return prometheus.NewDesc(d.fqName, d.help, d.variableLabels, prometheus.Labels(d.constLabels)).String() +} + +// toPrometheusDesc transform self to prometheus.Desc +func (d *Desc) toPrometheusDesc() *prometheus.Desc { + return d.promDesc +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (d *Desc) DeprecatedVersion() *semver.Version { + return parseSemver(d.deprecatedVersion) + +} + +func (d *Desc) determineDeprecationStatus(version semver.Version) { + selfVersion := d.DeprecatedVersion() + if selfVersion == nil { + return + } + d.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + d.isDeprecated = true + } + if ShouldShowHidden() { + klog.Warningf("Hidden metrics(%s) have been manually overridden, showing this very deprecated metric.", d.fqName) + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric(%s) has been deprecated for more than one release, hiding.", d.fqName) + d.isHidden = true + } + }) +} + +// IsHidden returns if metric will be hidden +func (d *Desc) IsHidden() bool { + return d.isHidden +} + +// IsDeprecated returns if metric has been deprecated +func (d *Desc) IsDeprecated() bool { + return d.isDeprecated +} + +// IsCreated returns if metric has been created. +func (d *Desc) IsCreated() bool { + d.createLock.RLock() + defer d.createLock.RUnlock() + + return d.isCreated +} + +// create forces the initialization of Desc which has been deferred until +// the point at which this method is invoked. This method will determine whether +// the Desc is deprecated or hidden, no-opting if the Desc should be considered +// hidden. Furthermore, this function no-opts and returns true if Desc is already +// created. +func (d *Desc) create(version *semver.Version) bool { + if version != nil { + d.determineDeprecationStatus(*version) + } + + // let's not create if this metric is slated to be hidden + if d.IsHidden() { + return false + } + d.createOnce.Do(func() { + d.createLock.Lock() + defer d.createLock.Unlock() + + d.isCreated = true + if d.IsDeprecated() { + d.initializeDeprecatedDesc() + } else { + d.initialize() + } + }) + return d.IsCreated() +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (d *Desc) ClearState() { + d.isDeprecated = false + d.isHidden = false + d.isCreated = false + + d.markDeprecationOnce = *new(sync.Once) + d.createOnce = *new(sync.Once) + d.deprecateOnce = *new(sync.Once) + d.hideOnce = *new(sync.Once) + d.annotateOnce = *new(sync.Once) + + d.annotatedHelp = d.help + d.promDesc = nil +} + +func (d *Desc) markDeprecated() { + d.deprecateOnce.Do(func() { + d.annotatedHelp = fmt.Sprintf("(Deprecated since %s) %s", d.deprecatedVersion, d.annotatedHelp) + }) +} + +func (d *Desc) annotateStabilityLevel() { + d.annotateOnce.Do(func() { + d.annotatedHelp = fmt.Sprintf("[%v] %v", d.stabilityLevel, d.annotatedHelp) + }) +} + +func (d *Desc) initialize() { + d.annotateStabilityLevel() + + // this actually creates the underlying prometheus desc. + d.promDesc = prometheus.NewDesc(d.fqName, d.annotatedHelp, d.variableLabels, prometheus.Labels(d.constLabels)) +} + +func (d *Desc) initializeDeprecatedDesc() { + d.markDeprecated() + d.initialize() +} + +// GetRawDesc will returns a new *Desc with original parameters provided to NewDesc(). +// +// It will be useful in testing scenario that the same Desc be registered to different registry. +// 1. Desc `D` is registered to registry 'A' in TestA (Note: `D` maybe created) +// 2. Desc `D` is registered to registry 'B' in TestB (Note: since 'D' has been created once, thus will be ignored by registry 'B') +func (d *Desc) GetRawDesc() *Desc { + return NewDesc(d.fqName, d.help, d.variableLabels, d.constLabels, d.stabilityLevel, d.deprecatedVersion) +} diff --git a/vendor/k8s.io/component-base/metrics/gauge.go b/vendor/k8s.io/component-base/metrics/gauge.go new file mode 100644 index 0000000000..7b4469fcb8 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/gauge.go @@ -0,0 +1,193 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/component-base/version" +) + +// Gauge is our internal representation for our wrapping struct around prometheus +// gauges. kubeGauge implements both kubeCollector and KubeGauge. +type Gauge struct { + GaugeMetric + *GaugeOpts + lazyMetric + selfCollector +} + +// NewGauge returns an object which satisfies the kubeCollector and KubeGauge interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewGauge(opts *GaugeOpts) *Gauge { + opts.StabilityLevel.setDefaults() + + kc := &Gauge{ + GaugeOpts: opts, + lazyMetric: lazyMetric{}, + } + kc.setPrometheusGauge(noop) + kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return kc +} + +// setPrometheusGauge sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (g *Gauge) setPrometheusGauge(gauge prometheus.Gauge) { + g.GaugeMetric = gauge + g.initSelfCollection(gauge) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (g *Gauge) DeprecatedVersion() *semver.Version { + return parseSemver(g.GaugeOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying Gauge. Until this method is called +// the underlying gauge is a no-op. +func (g *Gauge) initializeMetric() { + g.GaugeOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + g.setPrometheusGauge(prometheus.NewGauge(g.GaugeOpts.toPromGaugeOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Gauge. Until this method +// is called the underlying gauge is a no-op. +func (g *Gauge) initializeDeprecatedMetric() { + g.GaugeOpts.markDeprecated() + g.initializeMetric() +} + +// GaugeVec is the internal representation of our wrapping struct around prometheus +// gaugeVecs. kubeGaugeVec implements both kubeCollector and KubeGaugeVec. +type GaugeVec struct { + *prometheus.GaugeVec + *GaugeOpts + lazyMetric + originalLabels []string +} + +// NewGaugeVec returns an object which satisfies the kubeCollector and KubeGaugeVec interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec { + opts.StabilityLevel.setDefaults() + + cv := &GaugeVec{ + GaugeVec: noopGaugeVec, + GaugeOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + cv.lazyInit(cv, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *GaugeVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.GaugeOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeMetric() { + v.GaugeOpts.annotateStabilityLevel() + v.GaugeVec = prometheus.NewGaugeVec(v.GaugeOpts.toPromGaugeOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeDeprecatedMetric() { + v.GaugeOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208 + +// WithLabelValues returns the GaugeMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new GaugeMetric is created IFF the gaugeVec +// has been registered to a metrics registry. +func (v *GaugeVec) WithLabelValues(lvs ...string) GaugeMetric { + if !v.IsCreated() { + return noop // return no-op gauge + } + return v.GaugeVec.WithLabelValues(lvs...) +} + +// With returns the GaugeMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new GaugeMetric is created IFF the gaugeVec has +// been registered to a metrics registry. +func (v *GaugeVec) With(labels map[string]string) GaugeMetric { + if !v.IsCreated() { + return noop // return no-op gauge + } + return v.GaugeVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *GaugeVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.GaugeVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *GaugeVec) Reset() { + if !v.IsCreated() { + return + } + + v.GaugeVec.Reset() +} + +func newGaugeFunc(opts GaugeOpts, function func() float64, v semver.Version) GaugeFunc { + g := NewGauge(&opts) + + if !g.Create(&v) { + return nil + } + + return prometheus.NewGaugeFunc(g.GaugeOpts.toPromGaugeOpts(), function) +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + v := parseVersion(version.Get()) + + return newGaugeFunc(opts, function, v) +} diff --git a/vendor/k8s.io/component-base/metrics/histogram.go b/vendor/k8s.io/component-base/metrics/histogram.go new file mode 100644 index 0000000000..d233e12b92 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/histogram.go @@ -0,0 +1,177 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// DefBuckets is a wrapper for prometheus.DefBuckets +var DefBuckets = prometheus.DefBuckets + +// LinearBuckets is a wrapper for prometheus.LinearBuckets. +func LinearBuckets(start, width float64, count int) []float64 { + return prometheus.LinearBuckets(start, width, count) +} + +// ExponentialBuckets is a wrapper for prometheus.ExponentialBuckets. +func ExponentialBuckets(start, factor float64, count int) []float64 { + return prometheus.ExponentialBuckets(start, factor, count) +} + +// Histogram is our internal representation for our wrapping struct around prometheus +// histograms. Summary implements both kubeCollector and ObserverMetric +type Histogram struct { + ObserverMetric + *HistogramOpts + lazyMetric + selfCollector +} + +// NewHistogram returns an object which is Histogram-like. However, nothing +// will be measured until the histogram is registered somewhere. +func NewHistogram(opts *HistogramOpts) *Histogram { + opts.StabilityLevel.setDefaults() + + h := &Histogram{ + HistogramOpts: opts, + lazyMetric: lazyMetric{}, + } + h.setPrometheusHistogram(noopMetric{}) + h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return h +} + +// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (h *Histogram) setPrometheusHistogram(histogram prometheus.Histogram) { + h.ObserverMetric = histogram + h.initSelfCollection(histogram) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (h *Histogram) DeprecatedVersion() *semver.Version { + return parseSemver(h.HistogramOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Histogram object instantiation +// and stores a reference to it +func (h *Histogram) initializeMetric() { + h.HistogramOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + h.setPrometheusHistogram(prometheus.NewHistogram(h.HistogramOpts.toPromHistogramOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation +// but modifies the Help description prior to object instantiation. +func (h *Histogram) initializeDeprecatedMetric() { + h.HistogramOpts.markDeprecated() + h.initializeMetric() +} + +// HistogramVec is the internal representation of our wrapping struct around prometheus +// histogramVecs. +type HistogramVec struct { + *prometheus.HistogramVec + *HistogramOpts + lazyMetric + originalLabels []string +} + +// NewHistogramVec returns an object which satisfies kubeCollector and wraps the +// prometheus.HistogramVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated. +func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec { + opts.StabilityLevel.setDefaults() + + v := &HistogramVec{ + HistogramVec: noopHistogramVec, + HistogramOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + v.lazyInit(v, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *HistogramVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.HistogramOpts.DeprecatedVersion) +} + +func (v *HistogramVec) initializeMetric() { + v.HistogramOpts.annotateStabilityLevel() + v.HistogramVec = prometheus.NewHistogramVec(v.HistogramOpts.toPromHistogramOpts(), v.originalLabels) +} + +func (v *HistogramVec) initializeDeprecatedMetric() { + v.HistogramOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470 + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the HistogramVec +// has been registered to a metrics registry. +func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + return v.HistogramVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the HistogramVec has +// been registered to a metrics registry. +func (v *HistogramVec) With(labels map[string]string) ObserverMetric { + if !v.IsCreated() { + return noop + } + return v.HistogramVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *HistogramVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.HistogramVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *HistogramVec) Reset() { + if !v.IsCreated() { + return + } + + v.HistogramVec.Reset() +} diff --git a/vendor/k8s.io/component-base/metrics/http.go b/vendor/k8s.io/component-base/metrics/http.go new file mode 100644 index 0000000000..3394a8f711 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/http.go @@ -0,0 +1,77 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "io" + "net/http" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError promhttp.HandlerErrorHandling = iota + + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts promhttp.HandlerOpts + +func (ho *HandlerOpts) toPromhttpHandlerOpts() promhttp.HandlerOpts { + return promhttp.HandlerOpts(*ho) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg Gatherer, opts HandlerOpts) http.Handler { + return promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts()) +} + +// HandlerWithReset return an http.Handler with Reset +func HandlerWithReset(reg KubeRegistry, opts HandlerOpts) http.Handler { + defaultHandler := promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts()) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodDelete { + reg.Reset() + io.WriteString(w, "metrics reset\n") + return + } + defaultHandler.ServeHTTP(w, r) + }) +} diff --git a/vendor/k8s.io/component-base/metrics/labels.go b/vendor/k8s.io/component-base/metrics/labels.go new file mode 100644 index 0000000000..11af3ae424 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/labels.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Labels represents a collection of label name -> value mappings. +type Labels prometheus.Labels diff --git a/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go new file mode 100644 index 0000000000..2605103c10 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go @@ -0,0 +1,86 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package legacyregistry + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "k8s.io/component-base/metrics" +) + +var ( + defaultRegistry = metrics.NewKubeRegistry() + // DefaultGatherer exposes the global registry gatherer + DefaultGatherer metrics.Gatherer = defaultRegistry + // Reset calls reset on the global registry + Reset = defaultRegistry.Reset + // MustRegister registers registerable metrics but uses the global registry. + MustRegister = defaultRegistry.MustRegister + // RawMustRegister registers prometheus collectors but uses the global registry, this + // bypasses the metric stability framework + // + // Deprecated + RawMustRegister = defaultRegistry.RawMustRegister + + // Register registers a collectable metric but uses the global registry + Register = defaultRegistry.Register +) + +func init() { + RawMustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) + RawMustRegister(prometheus.NewGoCollector()) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using promhttp.Handler instead. +func Handler() http.Handler { + return promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, promhttp.HandlerFor(defaultRegistry, promhttp.HandlerOpts{})) +} + +// HandlerWithReset returns an HTTP handler for the DefaultGatherer but invokes +// registry reset if the http method is DELETE. +func HandlerWithReset() http.Handler { + return promhttp.InstrumentMetricHandler( + prometheus.DefaultRegisterer, + metrics.HandlerWithReset(defaultRegistry, metrics.HandlerOpts{})) +} + +// CustomRegister registers a custom collector but uses the global registry. +func CustomRegister(c metrics.StableCollector) error { + err := defaultRegistry.CustomRegister(c) + + //TODO(RainbowMango): Maybe we can wrap this error by error wrapping.(Golang 1.13) + _ = prometheus.Register(c) + + return err +} + +// CustomMustRegister registers custom collectors but uses the global registry. +func CustomMustRegister(cs ...metrics.StableCollector) { + defaultRegistry.CustomMustRegister(cs...) + + for _, c := range cs { + prometheus.MustRegister(c) + } +} diff --git a/vendor/k8s.io/component-base/metrics/metric.go b/vendor/k8s.io/component-base/metrics/metric.go new file mode 100644 index 0000000000..bb1f696270 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/metric.go @@ -0,0 +1,228 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + + "k8s.io/klog/v2" +) + +/* +kubeCollector extends the prometheus.Collector interface to allow customization of the metric +registration process. Defer metric initialization until Create() is called, which then +delegates to the underlying metric's initializeMetric or initializeDeprecatedMetric +method call depending on whether the metric is deprecated or not. +*/ +type kubeCollector interface { + Collector + lazyKubeMetric + DeprecatedVersion() *semver.Version + // Each collector metric should provide an initialization function + // for both deprecated and non-deprecated variants of a metric. This + // is necessary since metric instantiation will be deferred + // until the metric is actually registered somewhere. + initializeMetric() + initializeDeprecatedMetric() +} + +/* +lazyKubeMetric defines our metric registration interface. lazyKubeMetric objects are expected +to lazily instantiate metrics (i.e defer metric instantiation until when +the Create() function is explicitly called). +*/ +type lazyKubeMetric interface { + Create(*semver.Version) bool + IsCreated() bool + IsHidden() bool + IsDeprecated() bool +} + +/* +lazyMetric implements lazyKubeMetric. A lazy metric is lazy because it waits until metric +registration time before instantiation. Add it as an anonymous field to a struct that +implements kubeCollector to get deferred registration behavior. You must call lazyInit +with the kubeCollector itself as an argument. +*/ +type lazyMetric struct { + fqName string + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + self kubeCollector +} + +func (r *lazyMetric) IsCreated() bool { + r.createLock.RLock() + defer r.createLock.RUnlock() + return r.isCreated +} + +// lazyInit provides the lazyMetric with a reference to the kubeCollector it is supposed +// to allow lazy initialization for. It should be invoked in the factory function which creates new +// kubeCollector type objects. +func (r *lazyMetric) lazyInit(self kubeCollector, fqName string) { + r.fqName = fqName + r.self = self +} + +// determineDeprecationStatus figures out whether the lazy metric should be deprecated or not. +// This method takes a Version argument which should be the version of the binary in which +// this code is currently being executed. +func (r *lazyMetric) determineDeprecationStatus(version semver.Version) { + selfVersion := r.self.DeprecatedVersion() + if selfVersion == nil { + return + } + r.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + r.isDeprecated = true + } + if ShouldShowHidden() { + klog.Warningf("Hidden metrics (%s) have been manually overridden, showing this very deprecated metric.", r.fqName) + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric has been deprecated for more than one release, hiding.") + r.isHidden = true + } + }) +} + +func (r *lazyMetric) IsHidden() bool { + return r.isHidden +} + +func (r *lazyMetric) IsDeprecated() bool { + return r.isDeprecated +} + +// Create forces the initialization of metric which has been deferred until +// the point at which this method is invoked. This method will determine whether +// the metric is deprecated or hidden, no-opting if the metric should be considered +// hidden. Furthermore, this function no-opts and returns true if metric is already +// created. +func (r *lazyMetric) Create(version *semver.Version) bool { + if version != nil { + r.determineDeprecationStatus(*version) + } + // let's not create if this metric is slated to be hidden + if r.IsHidden() { + return false + } + r.createOnce.Do(func() { + r.createLock.Lock() + defer r.createLock.Unlock() + r.isCreated = true + if r.IsDeprecated() { + r.self.initializeDeprecatedMetric() + } else { + r.self.initializeMetric() + } + }) + return r.IsCreated() +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (r *lazyMetric) ClearState() { + r.createLock.Lock() + defer r.createLock.Unlock() + + r.isDeprecated = false + r.isHidden = false + r.isCreated = false + r.markDeprecationOnce = *(new(sync.Once)) + r.createOnce = *(new(sync.Once)) +} + +// FQName returns the fully-qualified metric name of the collector. +func (r *lazyMetric) FQName() string { + return r.fqName +} + +/* +This code is directly lifted from the prometheus codebase. It's a convenience struct which +allows you satisfy the Collector interface automatically if you already satisfy the Metric interface. + +For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/collector.go#L98-L120 +*/ +type selfCollector struct { + metric prometheus.Metric +} + +func (c *selfCollector) initSelfCollection(m prometheus.Metric) { + c.metric = m +} + +func (c *selfCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.metric.Desc() +} + +func (c *selfCollector) Collect(ch chan<- prometheus.Metric) { + ch <- c.metric +} + +// no-op vecs for convenience +var noopCounterVec = &prometheus.CounterVec{} +var noopHistogramVec = &prometheus.HistogramVec{} +var noopGaugeVec = &prometheus.GaugeVec{} +var noopObserverVec = &noopObserverVector{} + +// just use a convenience struct for all the no-ops +var noop = &noopMetric{} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Add(float64) {} +func (noopMetric) Dec() {} +func (noopMetric) Set(float64) {} +func (noopMetric) Sub(float64) {} +func (noopMetric) Observe(float64) {} +func (noopMetric) SetToCurrentTime() {} +func (noopMetric) Desc() *prometheus.Desc { return nil } +func (noopMetric) Write(*dto.Metric) error { return nil } +func (noopMetric) Describe(chan<- *prometheus.Desc) {} +func (noopMetric) Collect(chan<- prometheus.Metric) {} + +type noopObserverVector struct{} + +func (noopObserverVector) GetMetricWith(prometheus.Labels) (prometheus.Observer, error) { + return noop, nil +} +func (noopObserverVector) GetMetricWithLabelValues(...string) (prometheus.Observer, error) { + return noop, nil +} +func (noopObserverVector) With(prometheus.Labels) prometheus.Observer { return noop } +func (noopObserverVector) WithLabelValues(...string) prometheus.Observer { return noop } +func (noopObserverVector) CurryWith(prometheus.Labels) (prometheus.ObserverVec, error) { + return noopObserverVec, nil +} +func (noopObserverVector) MustCurryWith(prometheus.Labels) prometheus.ObserverVec { + return noopObserverVec +} +func (noopObserverVector) Describe(chan<- *prometheus.Desc) {} +func (noopObserverVector) Collect(chan<- prometheus.Metric) {} diff --git a/vendor/k8s.io/component-base/metrics/options.go b/vendor/k8s.io/component-base/metrics/options.go new file mode 100644 index 0000000000..c15dd9b4d2 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/options.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + + "github.com/blang/semver" + "github.com/spf13/pflag" + + "k8s.io/component-base/version" +) + +// Options has all parameters needed for exposing metrics from components +type Options struct { + ShowHiddenMetricsForVersion string +} + +// NewOptions returns default metrics options +func NewOptions() *Options { + return &Options{} +} + +// Validate validates metrics flags options. +func (o *Options) Validate() []error { + err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), o.ShowHiddenMetricsForVersion) + if err != nil { + return []error{err} + } + + return nil +} + +// AddFlags adds flags for exposing component metrics. +func (o *Options) AddFlags(fs *pflag.FlagSet) { + if o != nil { + o = NewOptions() + } + fs.StringVar(&o.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.ShowHiddenMetricsForVersion, + "The previous version for which you want to show hidden metrics. "+ + "Only the previous minor version is meaningful, other values will not be allowed. "+ + "The format is ., e.g.: '1.16'. "+ + "The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, "+ + "rather than being surprised when they are permanently removed in the release after that.") +} + +// Apply applies parameters into global configuration of metrics. +func (o *Options) Apply() { + if o != nil && len(o.ShowHiddenMetricsForVersion) > 0 { + SetShowHidden() + } +} + +func validateShowHiddenMetricsVersion(currentVersion semver.Version, targetVersionStr string) error { + if targetVersionStr == "" { + return nil + } + + validVersionStr := fmt.Sprintf("%d.%d", currentVersion.Major, currentVersion.Minor-1) + if targetVersionStr != validVersionStr { + return fmt.Errorf("--show-hidden-metrics-for-version must be omitted or have the value '%v'. Only the previous minor version is allowed", validVersionStr) + } + + return nil +} diff --git a/vendor/k8s.io/component-base/metrics/opts.go b/vendor/k8s.io/component-base/metrics/opts.go new file mode 100644 index 0000000000..906050c7f7 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/opts.go @@ -0,0 +1,245 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// KubeOpts is superset struct for prometheus.Opts. The prometheus Opts structure +// is purposefully not embedded here because that would change struct initialization +// in the manner which people are currently accustomed. +// +// Name must be set to a non-empty string. DeprecatedVersion is defined only +// if the metric for which this options applies is, in fact, deprecated. +type KubeOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + return prometheus.BuildFQName(namespace, subsystem, name) +} + +// StabilityLevel represents the API guarantees for a given defined metric. +type StabilityLevel string + +const ( + // ALPHA metrics have no stability guarantees, as such, labels may + // be arbitrarily added/removed and the metric may be deleted at any time. + ALPHA StabilityLevel = "ALPHA" + // STABLE metrics are guaranteed not be mutated and removal is governed by + // the deprecation policy outlined in by the control plane metrics stability KEP. + STABLE StabilityLevel = "STABLE" +) + +// setDefaults takes 'ALPHA' in case of empty. +func (sl *StabilityLevel) setDefaults() { + switch *sl { + case "": + *sl = ALPHA + default: + // no-op, since we have a StabilityLevel already + } +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts KubeOpts + +// Modify help description on the metric description. +func (o *CounterOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *CounterOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *CounterOpts) toPromCounterOpts() prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts KubeOpts + +// Modify help description on the metric description. +func (o *GaugeOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *GaugeOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *GaugeOpts) toPromGaugeOpts() prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel +} + +// Modify help description on the metric description. +func (o *HistogramOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *HistogramOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *HistogramOpts) toPromHistogramOpts() prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Buckets: o.Buckets, + } +} + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v0.10 of the library. +type SummaryOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Objectives map[float64]float64 + MaxAge time.Duration + AgeBuckets uint32 + BufCap uint32 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel +} + +// Modify help description on the metric description. +func (o *SummaryOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *SummaryOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// Deprecated: DefObjectives will not be used as the default objectives in +// v1.0.0 of the library. The default Summary will have no quantiles then. +var ( + defObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} +) + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *SummaryOpts) toPromSummaryOpts() prometheus.SummaryOpts { + // we need to retain existing quantile behavior for backwards compatibility, + // so let's do what prometheus used to do prior to v1. + objectives := o.Objectives + if objectives == nil { + objectives = defObjectives + } + return prometheus.SummaryOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Objectives: objectives, + MaxAge: o.MaxAge, + AgeBuckets: o.AgeBuckets, + BufCap: o.BufCap, + } +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime.go b/vendor/k8s.io/component-base/metrics/processstarttime.go new file mode 100644 index 0000000000..8dde458814 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime.go @@ -0,0 +1,67 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "os" + "time" + + "github.com/prometheus/procfs" + + "k8s.io/klog/v2" +) + +var processStartTime = NewGaugeVec( + &GaugeOpts{ + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + StabilityLevel: ALPHA, + }, + []string{}, +) + +// RegisterProcessStartTime registers the process_start_time_seconds to +// a prometheus registry. This metric needs to be included to ensure counter +// data fidelity. +func RegisterProcessStartTime(registrationFunc func(Registerable) error) error { + start, err := getProcessStart() + if err != nil { + klog.Errorf("Could not get process start time, %v", err) + start = float64(time.Now().Unix()) + } + // processStartTime is a lazy metric which only get initialized after registered. + // so we have to explicitly create it before setting the label value. Otherwise + // it is a noop. + if !processStartTime.IsCreated() { + processStartTime.initializeMetric() + } + processStartTime.WithLabelValues().Set(start) + return registrationFunc(processStartTime) +} + +func getProcessStart() (float64, error) { + pid := os.Getpid() + p, err := procfs.NewProc(pid) + if err != nil { + return 0, err + } + + if stat, err := p.Stat(); err == nil { + return stat.StartTime() + } + return 0, err +} diff --git a/vendor/k8s.io/component-base/metrics/registry.go b/vendor/k8s.io/component-base/metrics/registry.go new file mode 100644 index 0000000000..06a5c6503c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/registry.go @@ -0,0 +1,329 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + + apimachineryversion "k8s.io/apimachinery/pkg/version" + "k8s.io/component-base/version" +) + +var ( + showHiddenOnce sync.Once + showHidden atomic.Value + registries []*kubeRegistry // stores all registries created by NewKubeRegistry() + registriesLock sync.RWMutex +) + +// shouldHide be used to check if a specific metric with deprecated version should be hidden +// according to metrics deprecation lifecycle. +func shouldHide(currentVersion *semver.Version, deprecatedVersion *semver.Version) bool { + guardVersion, err := semver.Make(fmt.Sprintf("%d.%d.0", currentVersion.Major, currentVersion.Minor)) + if err != nil { + panic("failed to make version from current version") + } + + if deprecatedVersion.LT(guardVersion) { + return true + } + + return false +} + +// ValidateShowHiddenMetricsVersion checks invalid version for which show hidden metrics. +func ValidateShowHiddenMetricsVersion(v string) []error { + err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), v) + if err != nil { + return []error{err} + } + + return nil +} + +// SetShowHidden will enable showing hidden metrics. This will no-opt +// after the initial call +func SetShowHidden() { + showHiddenOnce.Do(func() { + showHidden.Store(true) + + // re-register collectors that has been hidden in phase of last registry. + for _, r := range registries { + r.enableHiddenCollectors() + r.enableHiddenStableCollectors() + } + }) +} + +// ShouldShowHidden returns whether showing hidden deprecated metrics +// is enabled. While the primary usecase for this is internal (to determine +// registration behavior) this can also be used to introspect +func ShouldShowHidden() bool { + return showHidden.Load() != nil && showHidden.Load().(bool) +} + +// Registerable is an interface for a collector metric which we +// will register with KubeRegistry. +type Registerable interface { + prometheus.Collector + + // Create will mark deprecated state for the collector + Create(version *semver.Version) bool + + // ClearState will clear all the states marked by Create. + ClearState() + + // FQName returns the fully-qualified metric name of the collector. + FQName() string +} + +type resettable interface { + Reset() +} + +// KubeRegistry is an interface which implements a subset of prometheus.Registerer and +// prometheus.Gatherer interfaces +type KubeRegistry interface { + // Deprecated + RawMustRegister(...prometheus.Collector) + // CustomRegister is our internal variant of Prometheus registry.Register + CustomRegister(c StableCollector) error + // CustomMustRegister is our internal variant of Prometheus registry.MustRegister + CustomMustRegister(cs ...StableCollector) + // Register conforms to Prometheus registry.Register + Register(Registerable) error + // MustRegister conforms to Prometheus registry.MustRegister + MustRegister(...Registerable) + // Unregister conforms to Prometheus registry.Unregister + Unregister(collector Collector) bool + // Gather conforms to Prometheus gatherer.Gather + Gather() ([]*dto.MetricFamily, error) + // Reset invokes the Reset() function on all items in the registry + // which are added as resettables. + Reset() +} + +// kubeRegistry is a wrapper around a prometheus registry-type object. Upon initialization +// the kubernetes binary version information is loaded into the registry object, so that +// automatic behavior can be configured for metric versioning. +type kubeRegistry struct { + PromRegistry + version semver.Version + hiddenCollectors map[string]Registerable // stores all collectors that has been hidden + stableCollectors []StableCollector // stores all stable collector + hiddenCollectorsLock sync.RWMutex + stableCollectorsLock sync.RWMutex + resetLock sync.RWMutex + resettables []resettable +} + +// Register registers a new Collector to be included in metrics +// collection. It returns an error if the descriptors provided by the +// Collector are invalid or if they — in combination with descriptors of +// already registered Collectors — do not fulfill the consistency and +// uniqueness criteria described in the documentation of metric.Desc. +func (kr *kubeRegistry) Register(c Registerable) error { + if c.Create(&kr.version) { + defer kr.addResettable(c) + return kr.PromRegistry.Register(c) + } + + kr.trackHiddenCollector(c) + return nil +} + +// MustRegister works like Register but registers any number of +// Collectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) MustRegister(cs ...Registerable) { + metrics := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version) { + metrics = append(metrics, c) + kr.addResettable(c) + } else { + kr.trackHiddenCollector(c) + } + } + kr.PromRegistry.MustRegister(metrics...) +} + +// CustomRegister registers a new custom collector. +func (kr *kubeRegistry) CustomRegister(c StableCollector) error { + kr.trackStableCollectors(c) + defer kr.addResettable(c) + if c.Create(&kr.version, c) { + return kr.PromRegistry.Register(c) + } + return nil +} + +// CustomMustRegister works like CustomRegister but registers any number of +// StableCollectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) CustomMustRegister(cs ...StableCollector) { + kr.trackStableCollectors(cs...) + collectors := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version, c) { + kr.addResettable(c) + collectors = append(collectors, c) + } + } + kr.PromRegistry.MustRegister(collectors...) +} + +// RawMustRegister takes a native prometheus.Collector and registers the collector +// to the registry. This bypasses metrics safety checks, so should only be used +// to register custom prometheus collectors. +// +// Deprecated +func (kr *kubeRegistry) RawMustRegister(cs ...prometheus.Collector) { + kr.PromRegistry.MustRegister(cs...) + for _, c := range cs { + kr.addResettable(c) + } +} + +// addResettable will automatically add our metric to our reset +// list if it satisfies the interface +func (kr *kubeRegistry) addResettable(i interface{}) { + kr.resetLock.Lock() + defer kr.resetLock.Unlock() + if resettable, ok := i.(resettable); ok { + kr.resettables = append(kr.resettables, resettable) + } +} + +// Unregister unregisters the Collector that equals the Collector passed +// in as an argument. (Two Collectors are considered equal if their +// Describe method yields the same set of descriptors.) The function +// returns whether a Collector was unregistered. Note that an unchecked +// Collector cannot be unregistered (as its Describe method does not +// yield any descriptor). +func (kr *kubeRegistry) Unregister(collector Collector) bool { + return kr.PromRegistry.Unregister(collector) +} + +// Gather calls the Collect method of the registered Collectors and then +// gathers the collected metrics into a lexicographically sorted slice +// of uniquely named MetricFamily protobufs. Gather ensures that the +// returned slice is valid and self-consistent so that it can be used +// for valid exposition. As an exception to the strict consistency +// requirements described for metric.Desc, Gather will tolerate +// different sets of label names for metrics of the same metric family. +func (kr *kubeRegistry) Gather() ([]*dto.MetricFamily, error) { + return kr.PromRegistry.Gather() +} + +// trackHiddenCollector stores all hidden collectors. +func (kr *kubeRegistry) trackHiddenCollector(c Registerable) { + kr.hiddenCollectorsLock.Lock() + defer kr.hiddenCollectorsLock.Unlock() + + kr.hiddenCollectors[c.FQName()] = c +} + +// trackStableCollectors stores all custom collectors. +func (kr *kubeRegistry) trackStableCollectors(cs ...StableCollector) { + kr.stableCollectorsLock.Lock() + defer kr.stableCollectorsLock.Unlock() + + kr.stableCollectors = append(kr.stableCollectors, cs...) +} + +// enableHiddenCollectors will re-register all of the hidden collectors. +func (kr *kubeRegistry) enableHiddenCollectors() { + if len(kr.hiddenCollectors) == 0 { + return + } + + kr.hiddenCollectorsLock.Lock() + cs := make([]Registerable, 0, len(kr.hiddenCollectors)) + + for _, c := range kr.hiddenCollectors { + c.ClearState() + cs = append(cs, c) + } + + kr.hiddenCollectors = nil + kr.hiddenCollectorsLock.Unlock() + kr.MustRegister(cs...) +} + +// enableHiddenStableCollectors will re-register the stable collectors if there is one or more hidden metrics in it. +// Since we can not register a metrics twice, so we have to unregister first then register again. +func (kr *kubeRegistry) enableHiddenStableCollectors() { + if len(kr.stableCollectors) == 0 { + return + } + + kr.stableCollectorsLock.Lock() + + cs := make([]StableCollector, 0, len(kr.stableCollectors)) + for _, c := range kr.stableCollectors { + if len(c.HiddenMetrics()) > 0 { + kr.Unregister(c) // unregister must happens before clear state, otherwise no metrics would be unregister + c.ClearState() + cs = append(cs, c) + } + } + + kr.stableCollectors = nil + kr.stableCollectorsLock.Unlock() + kr.CustomMustRegister(cs...) +} + +// Reset invokes Reset on all metrics that are resettable. +func (kr *kubeRegistry) Reset() { + kr.resetLock.RLock() + defer kr.resetLock.RUnlock() + for _, r := range kr.resettables { + r.Reset() + } +} + +// BuildVersion is a helper function that can be easily mocked. +var BuildVersion = version.Get + +func newKubeRegistry(v apimachineryversion.Info) *kubeRegistry { + r := &kubeRegistry{ + PromRegistry: prometheus.NewRegistry(), + version: parseVersion(v), + hiddenCollectors: make(map[string]Registerable), + resettables: make([]resettable, 0), + } + + registriesLock.Lock() + defer registriesLock.Unlock() + registries = append(registries, r) + + return r +} + +// NewKubeRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewKubeRegistry() KubeRegistry { + r := newKubeRegistry(BuildVersion()) + return r +} diff --git a/vendor/k8s.io/component-base/metrics/summary.go b/vendor/k8s.io/component-base/metrics/summary.go new file mode 100644 index 0000000000..726bcc4894 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/summary.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// Summary is our internal representation for our wrapping struct around prometheus +// summaries. Summary implements both kubeCollector and ObserverMetric +// +// DEPRECATED: as per the metrics overhaul KEP +type Summary struct { + ObserverMetric + *SummaryOpts + lazyMetric + selfCollector +} + +// NewSummary returns an object which is Summary-like. However, nothing +// will be measured until the summary is registered somewhere. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummary(opts *SummaryOpts) *Summary { + opts.StabilityLevel.setDefaults() + + s := &Summary{ + SummaryOpts: opts, + lazyMetric: lazyMetric{}, + } + s.setPrometheusSummary(noopMetric{}) + s.lazyInit(s, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return s +} + +// setPrometheusSummary sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (s *Summary) setPrometheusSummary(summary prometheus.Summary) { + s.ObserverMetric = summary + s.initSelfCollection(summary) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (s *Summary) DeprecatedVersion() *semver.Version { + return parseSemver(s.SummaryOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Summary object instantiation +// and stores a reference to it +func (s *Summary) initializeMetric() { + s.SummaryOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + s.setPrometheusSummary(prometheus.NewSummary(s.SummaryOpts.toPromSummaryOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Summary object instantiation +// but modifies the Help description prior to object instantiation. +func (s *Summary) initializeDeprecatedMetric() { + s.SummaryOpts.markDeprecated() + s.initializeMetric() +} + +// SummaryVec is the internal representation of our wrapping struct around prometheus +// summaryVecs. +// +// DEPRECATED: as per the metrics overhaul KEP +type SummaryVec struct { + *prometheus.SummaryVec + *SummaryOpts + lazyMetric + originalLabels []string +} + +// NewSummaryVec returns an object which satisfies kubeCollector and wraps the +// prometheus.SummaryVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec { + opts.StabilityLevel.setDefaults() + + v := &SummaryVec{ + SummaryOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + v.lazyInit(v, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *SummaryVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.SummaryOpts.DeprecatedVersion) +} + +func (v *SummaryVec) initializeMetric() { + v.SummaryOpts.annotateStabilityLevel() + v.SummaryVec = prometheus.NewSummaryVec(v.SummaryOpts.toPromSummaryOpts(), v.originalLabels) +} + +func (v *SummaryVec) initializeDeprecatedMetric() { + v.SummaryOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/summary.go#L485-L495 + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the summaryVec +// has been registered to a metrics registry. +func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + return v.SummaryVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the summaryVec has +// been registered to a metrics registry. +func (v *SummaryVec) With(labels map[string]string) ObserverMetric { + if !v.IsCreated() { + return noop + } + return v.SummaryVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *SummaryVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.SummaryVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *SummaryVec) Reset() { + if !v.IsCreated() { + return + } + + v.SummaryVec.Reset() +} diff --git a/vendor/k8s.io/component-base/metrics/value.go b/vendor/k8s.io/component-base/metrics/value.go new file mode 100644 index 0000000000..4a19aaa3bf --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/value.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +func (vt *ValueType) toPromValueType() prometheus.ValueType { + return prometheus.ValueType(*vt) +} + +// NewLazyConstMetric is a helper of MustNewConstMetric. +// +// Note: If the metrics described by the desc is hidden, the metrics will not be created. +func NewLazyConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + if desc.IsHidden() { + return nil + } + return prometheus.MustNewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...) +} + +// NewLazyMetricWithTimestamp is a helper of NewMetricWithTimestamp. +// +// Warning: the Metric 'm' must be the one created by NewLazyConstMetric(), +// otherwise, no stability guarantees would be offered. +func NewLazyMetricWithTimestamp(t time.Time, m Metric) Metric { + if m == nil { + return nil + } + + return prometheus.NewMetricWithTimestamp(t, m) +} diff --git a/vendor/k8s.io/component-base/metrics/version.go b/vendor/k8s.io/component-base/metrics/version.go new file mode 100644 index 0000000000..f963e205eb --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/version.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "k8s.io/component-base/version" + +var ( + buildInfo = NewGaugeVec( + &GaugeOpts{ + Name: "kubernetes_build_info", + Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.", + StabilityLevel: ALPHA, + }, + []string{"major", "minor", "git_version", "git_commit", "git_tree_state", "build_date", "go_version", "compiler", "platform"}, + ) +) + +// RegisterBuildInfo registers the build and version info in a metadata metric in prometheus +func RegisterBuildInfo(r KubeRegistry) { + info := version.Get() + r.MustRegister(buildInfo) + buildInfo.WithLabelValues(info.Major, info.Minor, info.GitVersion, info.GitCommit, info.GitTreeState, info.BuildDate, info.GoVersion, info.Compiler, info.Platform).Set(1) +} diff --git a/vendor/k8s.io/component-base/metrics/version_parser.go b/vendor/k8s.io/component-base/metrics/version_parser.go new file mode 100644 index 0000000000..d52bd74bdf --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/version_parser.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "regexp" + + "github.com/blang/semver" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +const ( + versionRegexpString = `^v(\d+\.\d+\.\d+)` +) + +var ( + versionRe = regexp.MustCompile(versionRegexpString) +) + +func parseSemver(s string) *semver.Version { + if s != "" { + sv := semver.MustParse(s) + return &sv + } + return nil +} +func parseVersion(ver apimachineryversion.Info) semver.Version { + matches := versionRe.FindAllStringSubmatch(ver.String(), -1) + + if len(matches) != 1 { + panic(fmt.Sprintf("version string \"%v\" doesn't match expected regular expression: \"%v\"", ver.String(), versionRe.String())) + } + return semver.MustParse(matches[0][1]) +} diff --git a/vendor/k8s.io/component-base/metrics/wrappers.go b/vendor/k8s.io/component-base/metrics/wrappers.go new file mode 100644 index 0000000000..6ae8a458ac --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/wrappers.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// This file contains a series of interfaces which we explicitly define for +// integrating with prometheus. We redefine the interfaces explicitly here +// so that we can prevent breakage if methods are ever added to prometheus +// variants of them. + +// Collector defines a subset of prometheus.Collector interface methods +type Collector interface { + Describe(chan<- *prometheus.Desc) + Collect(chan<- prometheus.Metric) +} + +// Metric defines a subset of prometheus.Metric interface methods +type Metric interface { + Desc() *prometheus.Desc + Write(*dto.Metric) error +} + +// CounterMetric is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. + +// CounterMetric is an interface which defines a subset of the interface provided by prometheus.Counter +type CounterMetric interface { + Inc() + Add(float64) +} + +// CounterVecMetric is an interface which prometheus.CounterVec satisfies. +type CounterVecMetric interface { + WithLabelValues(...string) CounterMetric + With(prometheus.Labels) CounterMetric +} + +// GaugeMetric is an interface which defines a subset of the interface provided by prometheus.Gauge +type GaugeMetric interface { + Set(float64) + Inc() + Dec() + Add(float64) + Write(out *dto.Metric) error + SetToCurrentTime() +} + +// ObserverMetric captures individual observations. +type ObserverMetric interface { + Observe(float64) +} + +// PromRegistry is an interface which implements a subset of prometheus.Registerer and +// prometheus.Gatherer interfaces +type PromRegistry interface { + Register(prometheus.Collector) error + MustRegister(...prometheus.Collector) + Unregister(prometheus.Collector) bool + Gather() ([]*dto.MetricFamily, error) +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. +type Gatherer interface { + prometheus.Gatherer +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} diff --git a/vendor/k8s.io/component-base/version/.gitattributes b/vendor/k8s.io/component-base/version/.gitattributes new file mode 100644 index 0000000000..7e349eff60 --- /dev/null +++ b/vendor/k8s.io/component-base/version/.gitattributes @@ -0,0 +1 @@ +base.go export-subst diff --git a/vendor/k8s.io/component-base/version/base.go b/vendor/k8s.io/component-base/version/base.go new file mode 100644 index 0000000000..e13678c305 --- /dev/null +++ b/vendor/k8s.io/component-base/version/base.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Base version information. +// +// This is the fallback data used when version information from git is not +// provided via go ldflags. It provides an approximation of the Kubernetes +// version for ad-hoc builds (e.g. `go build`) that cannot get the version +// information from git. +// +// If you are looking at these fields in the git tree, they look +// strange. They are modified on the fly by the build process. The +// in-tree values are dummy values used for "git archive", which also +// works for GitHub tar downloads. +// +// When releasing a new Kubernetes version, this file is updated by +// build/mark_new_version.sh to reflect the new version, and then a +// git annotated tag (using format vX.Y where X == Major version and Y +// == Minor version) is created to point to the commit that updates +// component-base/version/base.go +var ( + // TODO: Deprecate gitMajor and gitMinor, use only gitVersion + // instead. First step in deprecation, keep the fields but make + // them irrelevant. (Next we'll take it out, which may muck with + // scripts consuming the kubectl version output - but most of + // these should be looking at gitVersion already anyways.) + gitMajor string // major version, always numeric + gitMinor string // minor version, numeric possibly followed by "+" + + // semantic version, derived by build scripts (see + // https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md + // for a detailed discussion of this field) + // + // TODO: This field is still called "gitVersion" for legacy + // reasons. For prerelease versions, the build metadata on the + // semantic version is a git hash, but the version itself is no + // longer the direct output of "git describe", but a slight + // translation to be semver compliant. + + // NOTE: The $Format strings are replaced during 'git archive' thanks to the + // companion .gitattributes file containing 'export-subst' in this same + // directory. See also https://git-scm.com/docs/gitattributes + gitVersion = "v0.0.0-master+$Format:%h$" + gitCommit = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState = "" // state of git tree, either "clean" or "dirty" + + buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') +) diff --git a/vendor/k8s.io/component-base/version/def.bzl b/vendor/k8s.io/component-base/version/def.bzl new file mode 100644 index 0000000000..77edcbc89b --- /dev/null +++ b/vendor/k8s.io/component-base/version/def.bzl @@ -0,0 +1,39 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Implements hack/lib/version.sh's kube::version::ldflags() for Bazel. +def version_x_defs(): + # This should match the list of packages in kube::version::ldflag + stamp_pkgs = [ + "k8s.io/kubernetes/vendor/k8s.io/component-base/version", + "k8s.io/kubernetes/vendor/k8s.io/client-go/pkg/version", + ] + + # This should match the list of vars in kube::version::ldflags + # It should also match the list of vars set in hack/print-workspace-status.sh. + stamp_vars = [ + "buildDate", + "gitCommit", + "gitMajor", + "gitMinor", + "gitTreeState", + "gitVersion", + ] + + # Generate the cross-product. + x_defs = {} + for pkg in stamp_pkgs: + for var in stamp_vars: + x_defs["%s.%s" % (pkg, var)] = "{%s}" % var + return x_defs diff --git a/vendor/k8s.io/component-base/version/version.go b/vendor/k8s.io/component-base/version/version.go new file mode 100644 index 0000000000..d1e76dc00e --- /dev/null +++ b/vendor/k8s.io/component-base/version/version.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() apimachineryversion.Info { + // These variables typically come from -ldflags settings and in + // their absence fallback to the settings in ./base.go + return apimachineryversion.Info{ + Major: gitMajor, + Minor: gitMinor, + GitVersion: gitVersion, + GitCommit: gitCommit, + GitTreeState: gitTreeState, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 61f0059905..fc244d3f58 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -17,6 +17,12 @@ github.com/Azure/go-autorest/tracing github.com/PuerkitoBio/purell # github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 github.com/PuerkitoBio/urlesc +# github.com/beorn7/perks v1.0.1 +github.com/beorn7/perks/quantile +# github.com/blang/semver v3.5.1+incompatible +github.com/blang/semver +# github.com/cespare/xxhash/v2 v2.1.1 +github.com/cespare/xxhash/v2 # github.com/client9/misspell v0.3.4 ## explicit github.com/client9/misspell @@ -77,12 +83,28 @@ github.com/json-iterator/go github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +# github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 +github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 # github.com/pkg/errors v0.9.1 github.com/pkg/errors +# github.com/prometheus/client_golang v1.7.1 +github.com/prometheus/client_golang/prometheus +github.com/prometheus/client_golang/prometheus/internal +github.com/prometheus/client_golang/prometheus/promhttp +# github.com/prometheus/client_model v0.2.0 +github.com/prometheus/client_model/go +# github.com/prometheus/common v0.10.0 +github.com/prometheus/common/expfmt +github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg +github.com/prometheus/common/model +# github.com/prometheus/procfs v0.2.0 +github.com/prometheus/procfs +github.com/prometheus/procfs/internal/fs +github.com/prometheus/procfs/internal/util # github.com/spf13/cobra v0.0.5 ## explicit github.com/spf13/cobra @@ -552,6 +574,9 @@ k8s.io/component-base/logs k8s.io/component-base/logs/datapol k8s.io/component-base/logs/json k8s.io/component-base/logs/sanitization +k8s.io/component-base/metrics +k8s.io/component-base/metrics/legacyregistry +k8s.io/component-base/version # k8s.io/component-helpers v0.20.0 ## explicit k8s.io/component-helpers/scheduling/corev1 From 3bd031bbb3b067efdadf7b3f5595c9129205f37c Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Tue, 23 Feb 2021 00:27:58 +0100 Subject: [PATCH 2/5] Move build's versioning bits under pkg/version --- Makefile | 2 +- cmd/descheduler/app/version.go | 62 +------------------------- pkg/version/version.go | 80 ++++++++++++++++++++++++++++++++++ 3 files changed, 83 insertions(+), 61 deletions(-) create mode 100644 pkg/version/version.go diff --git a/Makefile b/Makefile index b05a62e9ea..e5b3e626db 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ # VERSION is based on a date stamp plus the last commit VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags --match "v*") BUILD=$(shell date +%FT%T%z) -LDFLAG_LOCATION=sigs.k8s.io/descheduler/cmd/descheduler/app +LDFLAG_LOCATION=sigs.k8s.io/descheduler/pkg/version ARCHS = amd64 arm64 LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD}" diff --git a/cmd/descheduler/app/version.go b/cmd/descheduler/app/version.go index a59f923364..43bb283d25 100644 --- a/cmd/descheduler/app/version.go +++ b/cmd/descheduler/app/version.go @@ -18,77 +18,19 @@ package app import ( "fmt" - "regexp" - "runtime" - "strings" "github.com/spf13/cobra" + "sigs.k8s.io/descheduler/pkg/version" ) -var ( - // version is a constant representing the version tag that - // generated this build. It should be set during build via -ldflags. - version string - // buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') - //It should be set during build via -ldflags. - buildDate string -) - -// Info holds the information related to descheduler app version. -type Info struct { - Major string `json:"major"` - Minor string `json:"minor"` - GitVersion string `json:"gitVersion"` - BuildDate string `json:"buildDate"` - GoVersion string `json:"goVersion"` - Compiler string `json:"compiler"` - Platform string `json:"platform"` -} - -// Get returns the overall codebase version. It's for detecting -// what code a binary was built from. -func Get() Info { - majorVersion, minorVersion := splitVersion(version) - return Info{ - Major: majorVersion, - Minor: minorVersion, - GitVersion: version, - BuildDate: buildDate, - GoVersion: runtime.Version(), - Compiler: runtime.Compiler, - Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), - } -} - func NewVersionCommand() *cobra.Command { var versionCmd = &cobra.Command{ Use: "version", Short: "Version of descheduler", Long: `Prints the version of descheduler.`, Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Descheduler version %+v\n", Get()) + fmt.Printf("Descheduler version %+v\n", version.Get()) }, } return versionCmd } - -// splitVersion splits the git version to generate major and minor versions needed. -func splitVersion(version string) (string, string) { - if version == "" { - return "", "" - } - - // Version from an automated container build environment for a tag. For example v20200521-v0.18.0. - m1, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+$`, version) - - // Version from an automated container build environment(not a tag) or a local build. For example v20201009-v0.18.0-46-g939c1c0. - m2, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+-\w+-\w+$`, version) - - if m1 || m2 { - semVer := strings.Split(version, "-")[1] - return strings.Trim(strings.Split(semVer, ".")[0], "v"), strings.Split(semVer, ".")[1] + "+" - } - - // Something went wrong - return "", "" -} diff --git a/pkg/version/version.go b/pkg/version/version.go new file mode 100644 index 0000000000..ab02915030 --- /dev/null +++ b/pkg/version/version.go @@ -0,0 +1,80 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "regexp" + "runtime" + "strings" +) + +var ( + // version is a constant representing the version tag that + // generated this build. It should be set during build via -ldflags. + version string + // buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') + //It should be set during build via -ldflags. + buildDate string +) + +// Info holds the information related to descheduler app version. +type Info struct { + Major string `json:"major"` + Minor string `json:"minor"` + GitVersion string `json:"gitVersion"` + BuildDate string `json:"buildDate"` + GoVersion string `json:"goVersion"` + Compiler string `json:"compiler"` + Platform string `json:"platform"` +} + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() Info { + majorVersion, minorVersion := splitVersion(version) + return Info{ + Major: majorVersion, + Minor: minorVersion, + GitVersion: version, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} + +// splitVersion splits the git version to generate major and minor versions needed. +func splitVersion(version string) (string, string) { + if version == "" { + return "", "" + } + + // Version from an automated container build environment for a tag. For example v20200521-v0.18.0. + m1, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+$`, version) + + // Version from an automated container build environment(not a tag) or a local build. For example v20201009-v0.18.0-46-g939c1c0. + m2, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+-\w+-\w+$`, version) + + if m1 || m2 { + semVer := strings.Split(version, "-")[1] + return strings.Trim(strings.Split(semVer, ".")[0], "v"), strings.Split(semVer, ".")[1] + "+" + } + + // Something went wrong + return "", "" +} From 1c5b32763b5d6ac154f0765eedf4f5460e62ac67 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Wed, 3 Mar 2021 11:14:37 +0100 Subject: [PATCH 3/5] Register metrics New metrics: - build_info: Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch - pods_evicted: Number of successfully evicted pods, by the result, by the strategy, by the namespace --- Makefile | 4 +- metrics/metrics.go | 72 ++++++++++++++++++++++++++++++++++ pkg/descheduler/descheduler.go | 3 ++ pkg/version/version.go | 12 +++++- 4 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 metrics/metrics.go diff --git a/Makefile b/Makefile index e5b3e626db..c580d1a461 100644 --- a/Makefile +++ b/Makefile @@ -16,11 +16,13 @@ # VERSION is based on a date stamp plus the last commit VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags --match "v*") +BRANCH?=$(shell git branch --show-current) +SHA1?=$(shell git rev-parse HEAD) BUILD=$(shell date +%FT%T%z) LDFLAG_LOCATION=sigs.k8s.io/descheduler/pkg/version ARCHS = amd64 arm64 -LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD}" +LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}" GOLANGCI_VERSION := v1.30.0 HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint) diff --git a/metrics/metrics.go b/metrics/metrics.go new file mode 100644 index 0000000000..5750d769a3 --- /dev/null +++ b/metrics/metrics.go @@ -0,0 +1,72 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "sigs.k8s.io/descheduler/pkg/version" +) + +const ( + // DeschedulerSubsystem - subsystem name used by descheduler + DeschedulerSubsystem = "descheduler" +) + +var ( + PodsEvicted = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: DeschedulerSubsystem, + Name: "pods_evicted", + Help: "Number of evicted pods, by the result, by the strategy, by the namespace. 'failed' result means a pod could not be evicted", + StabilityLevel: metrics.ALPHA, + }, []string{"result", "strategy", "namespace"}) + + buildInfo = metrics.NewGauge( + &metrics.GaugeOpts{ + Subsystem: DeschedulerSubsystem, + Name: "build_info", + Help: "Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch", + ConstLabels: map[string]string{"GoVersion": version.Get().GoVersion, "DeschedulerVersion": version.Get().GitVersion, "GitBranch": version.Get().GitBranch, "GitSha1": version.Get().GitSha1}, + StabilityLevel: metrics.ALPHA, + }, + ) + + metricsList = []metrics.Registerable{ + PodsEvicted, + buildInfo, + } +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + // Register the metrics. + registerMetrics.Do(func() { + RegisterMetrics(metricsList...) + }) +} + +// RegisterMetrics registers a list of metrics. +func RegisterMetrics(extraMetrics ...metrics.Registerable) { + for _, metric := range extraMetrics { + legacyregistry.MustRegister(metric) + } +} diff --git a/pkg/descheduler/descheduler.go b/pkg/descheduler/descheduler.go index 3c6f84d93c..36a6482b52 100644 --- a/pkg/descheduler/descheduler.go +++ b/pkg/descheduler/descheduler.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" "sigs.k8s.io/descheduler/cmd/descheduler/app/options" + "sigs.k8s.io/descheduler/metrics" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/client" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" @@ -36,6 +37,8 @@ import ( ) func Run(rs *options.DeschedulerServer) error { + metrics.Register() + ctx := context.Background() rsclient, err := client.CreateClient(rs.KubeconfigFile) if err != nil { diff --git a/pkg/version/version.go b/pkg/version/version.go index ab02915030..0325d775b7 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -28,8 +28,14 @@ var ( // generated this build. It should be set during build via -ldflags. version string // buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') - //It should be set during build via -ldflags. + // It should be set during build via -ldflags. buildDate string + // gitbranch is a constant representing git branch for this build. + // It should be set during build via -ldflags. + gitbranch string + // gitbranch is a constant representing git sha1 for this build. + // It should be set during build via -ldflags. + gitsha1 string ) // Info holds the information related to descheduler app version. @@ -37,6 +43,8 @@ type Info struct { Major string `json:"major"` Minor string `json:"minor"` GitVersion string `json:"gitVersion"` + GitBranch string `json:"gitBranch"` + GitSha1 string `json:"gitSha1"` BuildDate string `json:"buildDate"` GoVersion string `json:"goVersion"` Compiler string `json:"compiler"` @@ -51,6 +59,8 @@ func Get() Info { Major: majorVersion, Minor: minorVersion, GitVersion: version, + GitBranch: gitbranch, + GitSha1: gitsha1, BuildDate: buildDate, GoVersion: runtime.Version(), Compiler: runtime.Compiler, From 24458fb0ca989a16a9730e8a64ecf1a9c9daab89 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Tue, 23 Feb 2021 00:28:38 +0100 Subject: [PATCH 4/5] Increase pods_evicted metric --- pkg/descheduler/evictions/evictions.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/descheduler/evictions/evictions.go b/pkg/descheduler/evictions/evictions.go index 124b5a9098..ae7c7a55a8 100644 --- a/pkg/descheduler/evictions/evictions.go +++ b/pkg/descheduler/evictions/evictions.go @@ -31,6 +31,7 @@ import ( clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "sigs.k8s.io/descheduler/metrics" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" "sigs.k8s.io/descheduler/pkg/utils" @@ -97,12 +98,13 @@ func (pe *PodEvictor) TotalEvicted() int { // EvictPod returns non-nil error only when evicting a pod on a node is not // possible (due to maxPodsToEvictPerNode constraint). Success is true when the pod // is evicted on the server side. -func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, reasons ...string) (bool, error) { - var reason string +func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, strategy string, reasons ...string) (bool, error) { + reason := strategy if len(reasons) > 0 { - reason = " (" + strings.Join(reasons, ", ") + ")" + reason += " (" + strings.Join(reasons, ", ") + ")" } if pe.maxPodsToEvictPerNode > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvictPerNode { + metrics.PodsEvicted.With(map[string]string{"result": "maximum number reached", "strategy": strategy, "namespace": pod.Namespace}).Inc() return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvictPerNode, node.Name) } @@ -110,6 +112,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, if err != nil { // err is used only for logging purposes klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason) + metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace}).Inc() return false, nil } @@ -123,6 +126,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)}) r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"}) r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason)) + metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace}).Inc() } return true, nil } From 701f22404bea5a60ddbecf05534a589b36ff2a97 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Mon, 1 Mar 2021 12:09:05 +0100 Subject: [PATCH 5/5] Serve secure metrics at 10258 --- README.md | 17 +- cmd/descheduler/app/options/options.go | 23 +- cmd/descheduler/app/server.go | 26 + go.sum | 28 + .../github.com/NYTimes/gziphandler/.gitignore | 1 + .../NYTimes/gziphandler/.travis.yml | 6 + .../NYTimes/gziphandler/CODE_OF_CONDUCT.md | 75 + .../NYTimes/gziphandler/CONTRIBUTING.md | 30 + .../github.com/NYTimes/gziphandler/LICENSE.md | 13 + .../github.com/NYTimes/gziphandler/README.md | 52 + vendor/github.com/NYTimes/gziphandler/gzip.go | 332 + .../NYTimes/gziphandler/gzip_go18.go | 43 + vendor/github.com/coreos/go-semver/LICENSE | 202 + vendor/github.com/coreos/go-semver/NOTICE | 5 + .../coreos/go-semver/semver/semver.go | 296 + .../coreos/go-semver/semver/sort.go | 38 + vendor/github.com/coreos/go-systemd/LICENSE | 191 + vendor/github.com/coreos/go-systemd/NOTICE | 5 + .../coreos/go-systemd/daemon/sdnotify.go | 84 + .../coreos/go-systemd/daemon/watchdog.go | 73 + .../coreos/go-systemd/journal/journal.go | 225 + vendor/github.com/coreos/pkg/LICENSE | 202 + vendor/github.com/coreos/pkg/NOTICE | 5 + .../github.com/coreos/pkg/capnslog/README.md | 39 + .../coreos/pkg/capnslog/formatters.go | 157 + .../coreos/pkg/capnslog/glog_formatter.go | 96 + vendor/github.com/coreos/pkg/capnslog/init.go | 49 + .../coreos/pkg/capnslog/init_windows.go | 25 + .../coreos/pkg/capnslog/journald_formatter.go | 68 + .../coreos/pkg/capnslog/log_hijack.go | 39 + .../github.com/coreos/pkg/capnslog/logmap.go | 245 + .../coreos/pkg/capnslog/pkg_logger.go | 191 + .../coreos/pkg/capnslog/syslog_formatter.go | 65 + .../gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../gogo/protobuf/gogoproto/gogo.pb.go | 874 + .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../gogo/protobuf/gogoproto/gogo.proto | 144 + .../gogo/protobuf/gogoproto/helper.go | 415 + .../protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 2865 +++ .../descriptor/descriptor_gostring.gen.go | 752 + .../protoc-gen-gogo/descriptor/helper.go | 390 + vendor/github.com/google/uuid/.travis.yml | 9 + vendor/github.com/google/uuid/CONTRIBUTING.md | 10 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 19 + vendor/github.com/google/uuid/dce.go | 80 + vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/go.mod | 1 + vendor/github.com/google/uuid/hash.go | 53 + vendor/github.com/google/uuid/marshal.go | 38 + vendor/github.com/google/uuid/node.go | 90 + vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 + vendor/github.com/google/uuid/sql.go | 59 + vendor/github.com/google/uuid/time.go | 123 + vendor/github.com/google/uuid/util.go | 43 + vendor/github.com/google/uuid/uuid.go | 245 + vendor/github.com/google/uuid/version1.go | 44 + vendor/github.com/google/uuid/version4.go | 43 + .../go-grpc-prometheus/.gitignore | 201 + .../go-grpc-prometheus/.travis.yml | 25 + .../go-grpc-prometheus/CHANGELOG.md | 24 + .../grpc-ecosystem/go-grpc-prometheus/LICENSE | 201 + .../go-grpc-prometheus/README.md | 247 + .../go-grpc-prometheus/client.go | 39 + .../go-grpc-prometheus/client_metrics.go | 170 + .../go-grpc-prometheus/client_reporter.go | 46 + .../go-grpc-prometheus/makefile | 16 + .../go-grpc-prometheus/metric_options.go | 41 + .../go-grpc-prometheus/server.go | 48 + .../go-grpc-prometheus/server_metrics.go | 185 + .../go-grpc-prometheus/server_reporter.go | 46 + .../grpc-ecosystem/go-grpc-prometheus/util.go | 50 + vendor/github.com/munnerz/goautoneg/LICENSE | 31 + vendor/github.com/munnerz/goautoneg/Makefile | 13 + .../github.com/munnerz/goautoneg/README.txt | 67 + .../github.com/munnerz/goautoneg/autoneg.go | 189 + .../client_golang/prometheus/testutil/lint.go | 46 + .../prometheus/testutil/promlint/promlint.go | 386 + .../prometheus/testutil/testutil.go | 230 + vendor/go.etcd.io/etcd/LICENSE | 202 + vendor/go.etcd.io/etcd/NOTICE | 5 + vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go | 977 + vendor/go.etcd.io/etcd/auth/authpb/auth.proto | 42 + vendor/go.etcd.io/etcd/clientv3/README.md | 85 + vendor/go.etcd.io/etcd/clientv3/auth.go | 242 + .../etcd/clientv3/balancer/balancer.go | 293 + .../balancer/connectivity/connectivity.go | 93 + .../etcd/clientv3/balancer/picker/doc.go | 16 + .../etcd/clientv3/balancer/picker/err.go | 39 + .../etcd/clientv3/balancer/picker/picker.go | 91 + .../balancer/picker/roundrobin_balanced.go | 95 + .../balancer/resolver/endpoint/endpoint.go | 247 + .../etcd/clientv3/balancer/utils.go | 68 + vendor/go.etcd.io/etcd/clientv3/client.go | 664 + vendor/go.etcd.io/etcd/clientv3/cluster.go | 141 + vendor/go.etcd.io/etcd/clientv3/compact_op.go | 51 + vendor/go.etcd.io/etcd/clientv3/compare.go | 140 + vendor/go.etcd.io/etcd/clientv3/config.go | 88 + .../etcd/clientv3/credentials/credentials.go | 173 + vendor/go.etcd.io/etcd/clientv3/ctx.go | 64 + vendor/go.etcd.io/etcd/clientv3/doc.go | 106 + vendor/go.etcd.io/etcd/clientv3/kv.go | 177 + vendor/go.etcd.io/etcd/clientv3/lease.go | 596 + vendor/go.etcd.io/etcd/clientv3/logger.go | 101 + .../go.etcd.io/etcd/clientv3/maintenance.go | 243 + vendor/go.etcd.io/etcd/clientv3/op.go | 560 + vendor/go.etcd.io/etcd/clientv3/options.go | 65 + vendor/go.etcd.io/etcd/clientv3/retry.go | 298 + .../etcd/clientv3/retry_interceptor.go | 392 + vendor/go.etcd.io/etcd/clientv3/sort.go | 37 + vendor/go.etcd.io/etcd/clientv3/txn.go | 151 + vendor/go.etcd.io/etcd/clientv3/utils.go | 49 + vendor/go.etcd.io/etcd/clientv3/watch.go | 1035 + .../etcd/etcdserver/api/v3rpc/rpctypes/doc.go | 16 + .../etcdserver/api/v3rpc/rpctypes/error.go | 235 + .../etcd/etcdserver/api/v3rpc/rpctypes/md.go | 22 + .../api/v3rpc/rpctypes/metadatafields.go | 20 + .../etcdserver/etcdserverpb/etcdserver.pb.go | 1041 + .../etcdserver/etcdserverpb/etcdserver.proto | 34 + .../etcdserverpb/raft_internal.pb.go | 2127 ++ .../etcdserverpb/raft_internal.proto | 75 + .../etcdserverpb/raft_internal_stringer.go | 183 + .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 20088 ++++++++++++++++ .../etcd/etcdserver/etcdserverpb/rpc.proto | 1146 + vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go | 718 + vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto | 49 + .../go.etcd.io/etcd/pkg/fileutil/dir_unix.go | 27 + .../etcd/pkg/fileutil/dir_windows.go | 51 + vendor/go.etcd.io/etcd/pkg/fileutil/doc.go | 16 + .../go.etcd.io/etcd/pkg/fileutil/fileutil.go | 129 + vendor/go.etcd.io/etcd/pkg/fileutil/lock.go | 26 + .../etcd/pkg/fileutil/lock_flock.go | 49 + .../etcd/pkg/fileutil/lock_linux.go | 97 + .../etcd/pkg/fileutil/lock_plan9.go | 45 + .../etcd/pkg/fileutil/lock_solaris.go | 62 + .../go.etcd.io/etcd/pkg/fileutil/lock_unix.go | 29 + .../etcd/pkg/fileutil/lock_windows.go | 125 + .../etcd/pkg/fileutil/preallocate.go | 54 + .../etcd/pkg/fileutil/preallocate_darwin.go | 65 + .../etcd/pkg/fileutil/preallocate_unix.go | 49 + .../pkg/fileutil/preallocate_unsupported.go | 25 + vendor/go.etcd.io/etcd/pkg/fileutil/purge.go | 98 + .../go.etcd.io/etcd/pkg/fileutil/read_dir.go | 70 + vendor/go.etcd.io/etcd/pkg/fileutil/sync.go | 29 + .../etcd/pkg/fileutil/sync_darwin.go | 40 + .../etcd/pkg/fileutil/sync_linux.go | 34 + .../etcd/pkg/logutil/discard_logger.go | 46 + vendor/go.etcd.io/etcd/pkg/logutil/doc.go | 16 + .../go.etcd.io/etcd/pkg/logutil/log_level.go | 70 + vendor/go.etcd.io/etcd/pkg/logutil/logger.go | 64 + .../etcd/pkg/logutil/merge_logger.go | 194 + .../etcd/pkg/logutil/package_logger.go | 60 + vendor/go.etcd.io/etcd/pkg/logutil/zap.go | 91 + .../go.etcd.io/etcd/pkg/logutil/zap_grpc.go | 111 + .../etcd/pkg/logutil/zap_journal.go | 92 + .../go.etcd.io/etcd/pkg/logutil/zap_raft.go | 102 + vendor/go.etcd.io/etcd/pkg/systemd/doc.go | 16 + vendor/go.etcd.io/etcd/pkg/systemd/journal.go | 29 + .../etcd/pkg/tlsutil/cipher_suites.go | 51 + vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go | 16 + vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go | 73 + vendor/go.etcd.io/etcd/pkg/transport/doc.go | 17 + .../etcd/pkg/transport/keepalive_listener.go | 94 + .../etcd/pkg/transport/limit_listen.go | 80 + .../go.etcd.io/etcd/pkg/transport/listener.go | 449 + .../etcd/pkg/transport/listener_tls.go | 272 + .../etcd/pkg/transport/timeout_conn.go | 44 + .../etcd/pkg/transport/timeout_dialer.go | 36 + .../etcd/pkg/transport/timeout_listener.go | 57 + .../etcd/pkg/transport/timeout_transport.go | 51 + vendor/go.etcd.io/etcd/pkg/transport/tls.go | 49 + .../etcd/pkg/transport/transport.go | 71 + .../etcd/pkg/transport/unix_listener.go | 40 + vendor/go.etcd.io/etcd/pkg/types/doc.go | 17 + vendor/go.etcd.io/etcd/pkg/types/id.go | 39 + vendor/go.etcd.io/etcd/pkg/types/set.go | 195 + vendor/go.etcd.io/etcd/pkg/types/slice.go | 22 + vendor/go.etcd.io/etcd/pkg/types/urls.go | 82 + vendor/go.etcd.io/etcd/pkg/types/urlsmap.go | 107 + vendor/go.etcd.io/etcd/raft/OWNERS | 19 + vendor/go.etcd.io/etcd/raft/README.md | 197 + vendor/go.etcd.io/etcd/raft/bootstrap.go | 80 + .../etcd/raft/confchange/confchange.go | 425 + .../etcd/raft/confchange/restore.go | 155 + vendor/go.etcd.io/etcd/raft/design.md | 57 + vendor/go.etcd.io/etcd/raft/doc.go | 300 + vendor/go.etcd.io/etcd/raft/log.go | 372 + vendor/go.etcd.io/etcd/raft/log_unstable.go | 157 + vendor/go.etcd.io/etcd/raft/logger.go | 132 + vendor/go.etcd.io/etcd/raft/node.go | 584 + vendor/go.etcd.io/etcd/raft/quorum/joint.go | 75 + .../go.etcd.io/etcd/raft/quorum/majority.go | 210 + vendor/go.etcd.io/etcd/raft/quorum/quorum.go | 58 + .../etcd/raft/quorum/voteresult_string.go | 26 + vendor/go.etcd.io/etcd/raft/raft.go | 1656 ++ .../go.etcd.io/etcd/raft/raftpb/confchange.go | 170 + .../go.etcd.io/etcd/raft/raftpb/confstate.go | 45 + vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go | 2646 ++ vendor/go.etcd.io/etcd/raft/raftpb/raft.proto | 177 + vendor/go.etcd.io/etcd/raft/rawnode.go | 239 + vendor/go.etcd.io/etcd/raft/read_only.go | 121 + vendor/go.etcd.io/etcd/raft/status.go | 106 + vendor/go.etcd.io/etcd/raft/storage.go | 273 + .../go.etcd.io/etcd/raft/tracker/inflights.go | 132 + .../go.etcd.io/etcd/raft/tracker/progress.go | 259 + vendor/go.etcd.io/etcd/raft/tracker/state.go | 42 + .../go.etcd.io/etcd/raft/tracker/tracker.go | 288 + vendor/go.etcd.io/etcd/raft/util.go | 233 + vendor/go.etcd.io/etcd/version/version.go | 56 + vendor/golang.org/x/crypto/cryptobyte/asn1.go | 752 + .../x/crypto/cryptobyte/asn1/asn1.go | 46 + .../golang.org/x/crypto/cryptobyte/builder.go | 337 + .../golang.org/x/crypto/cryptobyte/string.go | 161 + .../x/crypto/internal/subtle/aliasing.go | 32 + .../internal/subtle/aliasing_appengine.go | 35 + .../x/crypto/nacl/secretbox/secretbox.go | 173 + .../x/crypto/poly1305/bits_compat.go | 39 + .../x/crypto/poly1305/bits_go1.13.go | 21 + .../golang.org/x/crypto/poly1305/mac_noasm.go | 9 + .../golang.org/x/crypto/poly1305/poly1305.go | 99 + .../golang.org/x/crypto/poly1305/sum_amd64.go | 47 + .../golang.org/x/crypto/poly1305/sum_amd64.s | 108 + .../x/crypto/poly1305/sum_generic.go | 310 + .../x/crypto/poly1305/sum_ppc64le.go | 47 + .../x/crypto/poly1305/sum_ppc64le.s | 181 + .../golang.org/x/crypto/poly1305/sum_s390x.go | 75 + .../golang.org/x/crypto/poly1305/sum_s390x.s | 503 + .../x/crypto/salsa20/salsa/hsalsa20.go | 144 + .../x/crypto/salsa20/salsa/salsa208.go | 199 + .../x/crypto/salsa20/salsa/salsa20_amd64.go | 23 + .../x/crypto/salsa20/salsa/salsa20_amd64.s | 883 + .../x/crypto/salsa20/salsa/salsa20_noasm.go | 14 + .../x/crypto/salsa20/salsa/salsa20_ref.go | 231 + .../x/net/internal/timeseries/timeseries.go | 525 + vendor/golang.org/x/net/trace/events.go | 532 + vendor/golang.org/x/net/trace/histogram.go | 365 + vendor/golang.org/x/net/trace/trace.go | 1130 + vendor/golang.org/x/net/websocket/client.go | 106 + vendor/golang.org/x/net/websocket/dial.go | 24 + vendor/golang.org/x/net/websocket/hybi.go | 583 + vendor/golang.org/x/net/websocket/server.go | 113 + .../golang.org/x/net/websocket/websocket.go | 451 + vendor/golang.org/x/sync/AUTHORS | 3 + vendor/golang.org/x/sync/CONTRIBUTORS | 3 + vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + .../x/sync/singleflight/singleflight.go | 212 + vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 + vendor/golang.org/x/sys/cpu/byteorder.go | 65 + vendor/golang.org/x/sys/cpu/cpu.go | 287 + vendor/golang.org/x/sys/cpu/cpu_aix.go | 32 + vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 172 + vendor/golang.org/x/sys/cpu/cpu_arm64.s | 31 + vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 11 + vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 16 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 11 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 43 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 26 + vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 + vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 71 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 23 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 31 + .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 15 + vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 + .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 + vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 9 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 10 + .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 12 + vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 16 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 11 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 + vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 + vendor/golang.org/x/sys/cpu/cpu_wasm.go | 17 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 135 + vendor/golang.org/x/sys/cpu/cpu_x86.s | 27 + vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + vendor/golang.org/x/sys/cpu/hwcap_linux.go | 56 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 27 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 36 + vendor/google.golang.org/genproto/LICENSE | 202 + .../googleapis/rpc/status/status.pb.go | 206 + vendor/google.golang.org/grpc/.travis.yml | 42 + vendor/google.golang.org/grpc/AUTHORS | 1 + .../google.golang.org/grpc/CODE-OF-CONDUCT.md | 3 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 62 + vendor/google.golang.org/grpc/GOVERNANCE.md | 1 + vendor/google.golang.org/grpc/LICENSE | 202 + vendor/google.golang.org/grpc/MAINTAINERS.md | 27 + vendor/google.golang.org/grpc/Makefile | 63 + vendor/google.golang.org/grpc/README.md | 121 + .../grpc/attributes/attributes.go | 70 + vendor/google.golang.org/grpc/backoff.go | 58 + .../google.golang.org/grpc/backoff/backoff.go | 52 + vendor/google.golang.org/grpc/balancer.go | 391 + .../grpc/balancer/balancer.go | 454 + .../grpc/balancer/base/balancer.go | 278 + .../grpc/balancer/base/base.go | 93 + .../grpc/balancer/roundrobin/roundrobin.go | 81 + .../grpc/balancer_conn_wrappers.go | 271 + .../grpc/balancer_v1_wrapper.go | 334 + .../grpc_binarylog_v1/binarylog.pb.go | 900 + vendor/google.golang.org/grpc/call.go | 74 + vendor/google.golang.org/grpc/clientconn.go | 1568 ++ vendor/google.golang.org/grpc/codec.go | 50 + vendor/google.golang.org/grpc/codegen.sh | 17 + .../grpc/codes/code_string.go | 62 + vendor/google.golang.org/grpc/codes/codes.go | 198 + .../grpc/connectivity/connectivity.go | 73 + .../grpc/credentials/credentials.go | 251 + .../grpc/credentials/go12.go | 30 + .../grpc/credentials/internal/syscallconn.go | 61 + .../internal/syscallconn_appengine.go | 30 + .../google.golang.org/grpc/credentials/tls.go | 225 + vendor/google.golang.org/grpc/dialoptions.go | 594 + vendor/google.golang.org/grpc/doc.go | 24 + .../grpc/encoding/encoding.go | 122 + .../grpc/encoding/proto/proto.go | 110 + vendor/google.golang.org/grpc/go.mod | 16 + vendor/google.golang.org/grpc/go.sum | 53 + .../google.golang.org/grpc/grpclog/grpclog.go | 126 + .../google.golang.org/grpc/grpclog/logger.go | 85 + .../grpc/grpclog/loggerv2.go | 195 + vendor/google.golang.org/grpc/install_gae.sh | 6 + vendor/google.golang.org/grpc/interceptor.go | 77 + .../grpc/internal/backoff/backoff.go | 73 + .../grpc/internal/balancerload/load.go | 46 + .../grpc/internal/binarylog/binarylog.go | 167 + .../internal/binarylog/binarylog_testutil.go | 42 + .../grpc/internal/binarylog/env_config.go | 210 + .../grpc/internal/binarylog/method_logger.go | 423 + .../grpc/internal/binarylog/regenerate.sh | 33 + .../grpc/internal/binarylog/sink.go | 162 + .../grpc/internal/binarylog/util.go | 41 + .../grpc/internal/buffer/unbounded.go | 85 + .../grpc/internal/channelz/funcs.go | 727 + .../grpc/internal/channelz/types.go | 702 + .../grpc/internal/channelz/types_linux.go | 53 + .../grpc/internal/channelz/types_nonlinux.go | 44 + .../grpc/internal/channelz/util_linux.go | 39 + .../grpc/internal/channelz/util_nonlinux.go | 26 + .../grpc/internal/envconfig/envconfig.go | 38 + .../grpc/internal/grpcrand/grpcrand.go | 56 + .../grpc/internal/grpcsync/event.go | 61 + .../grpc/internal/internal.go | 72 + .../internal/resolver/dns/dns_resolver.go | 441 + .../grpc/internal/resolver/dns/go113.go | 33 + .../resolver/passthrough/passthrough.go | 57 + .../grpc/internal/syscall/syscall_linux.go | 114 + .../grpc/internal/syscall/syscall_nonlinux.go | 73 + .../grpc/internal/transport/bdp_estimator.go | 141 + .../grpc/internal/transport/controlbuf.go | 926 + .../grpc/internal/transport/defaults.go | 49 + .../grpc/internal/transport/flowcontrol.go | 217 + .../grpc/internal/transport/handler_server.go | 435 + .../grpc/internal/transport/http2_client.go | 1454 ++ .../grpc/internal/transport/http2_server.go | 1253 + .../grpc/internal/transport/http_util.go | 677 + .../grpc/internal/transport/log.go | 44 + .../grpc/internal/transport/transport.go | 808 + .../grpc/keepalive/keepalive.go | 85 + .../grpc/metadata/metadata.go | 209 + .../grpc/naming/dns_resolver.go | 293 + .../google.golang.org/grpc/naming/naming.go | 68 + vendor/google.golang.org/grpc/peer/peer.go | 51 + .../google.golang.org/grpc/picker_wrapper.go | 229 + vendor/google.golang.org/grpc/pickfirst.go | 159 + vendor/google.golang.org/grpc/preloader.go | 64 + vendor/google.golang.org/grpc/proxy.go | 152 + .../grpc/resolver/dns/dns_resolver.go | 36 + .../grpc/resolver/passthrough/passthrough.go | 26 + .../grpc/resolver/resolver.go | 253 + .../grpc/resolver_conn_wrapper.go | 263 + vendor/google.golang.org/grpc/rpc_util.go | 887 + vendor/google.golang.org/grpc/server.go | 1548 ++ .../google.golang.org/grpc/service_config.go | 434 + .../grpc/serviceconfig/serviceconfig.go | 41 + .../google.golang.org/grpc/stats/handlers.go | 63 + vendor/google.golang.org/grpc/stats/stats.go | 311 + .../google.golang.org/grpc/status/status.go | 228 + vendor/google.golang.org/grpc/stream.go | 1529 ++ vendor/google.golang.org/grpc/tap/tap.go | 51 + vendor/google.golang.org/grpc/trace.go | 123 + vendor/google.golang.org/grpc/version.go | 22 + vendor/google.golang.org/grpc/vet.sh | 159 + .../natefinch/lumberjack.v2/.gitignore | 23 + .../natefinch/lumberjack.v2/.travis.yml | 6 + .../gopkg.in/natefinch/lumberjack.v2/LICENSE | 21 + .../natefinch/lumberjack.v2/README.md | 179 + .../gopkg.in/natefinch/lumberjack.v2/chown.go | 11 + .../natefinch/lumberjack.v2/chown_linux.go | 19 + .../natefinch/lumberjack.v2/lumberjack.go | 541 + vendor/k8s.io/api/admission/v1/doc.go | 23 + .../k8s.io/api/admission/v1/generated.pb.go | 1792 ++ .../k8s.io/api/admission/v1/generated.proto | 167 + vendor/k8s.io/api/admission/v1/register.go | 51 + vendor/k8s.io/api/admission/v1/types.go | 169 + .../v1/types_swagger_doc_generated.go | 78 + .../api/admission/v1/zz_generated.deepcopy.go | 141 + vendor/k8s.io/api/admission/v1beta1/doc.go | 24 + .../api/admission/v1beta1/generated.pb.go | 1792 ++ .../api/admission/v1beta1/generated.proto | 167 + .../k8s.io/api/admission/v1beta1/register.go | 51 + vendor/k8s.io/api/admission/v1beta1/types.go | 174 + .../v1beta1/types_swagger_doc_generated.go | 78 + .../v1beta1/zz_generated.deepcopy.go | 141 + .../zz_generated.prerelease-lifecycle.go | 49 + .../apimachinery/pkg/api/equality/semantic.go | 49 + .../apimachinery/pkg/api/validation/doc.go | 18 + .../pkg/api/validation/generic.go | 86 + .../pkg/api/validation/objectmeta.go | 268 + .../pkg/api/validation/path/name.go | 68 + .../apis/meta/internalversion/scheme/doc.go | 17 + .../meta/internalversion/scheme/register.go | 39 + .../internalversion/validation/validation.go | 46 + .../pkg/apis/meta/v1/validation/validation.go | 262 + .../meta/v1beta1/validation/validation.go | 33 + .../k8s.io/apimachinery/pkg/util/rand/rand.go | 127 + .../k8s.io/apimachinery/pkg/util/uuid/uuid.go | 27 + .../apimachinery/pkg/util/waitgroup/doc.go | 19 + .../pkg/util/waitgroup/waitgroup.go | 57 + .../apiserver/pkg/admission/attributes.go | 211 + .../k8s.io/apiserver/pkg/admission/audit.go | 103 + .../k8s.io/apiserver/pkg/admission/chain.go | 70 + .../k8s.io/apiserver/pkg/admission/config.go | 175 + .../configuration/configuration_manager.go | 166 + .../configuration/mutating_webhook_manager.go | 106 + .../validating_webhook_manager.go | 104 + .../apiserver/pkg/admission/decorator.go | 39 + .../k8s.io/apiserver/pkg/admission/errors.go | 72 + .../k8s.io/apiserver/pkg/admission/handler.go | 79 + .../pkg/admission/initializer/initializer.go | 72 + .../pkg/admission/initializer/interfaces.go | 61 + .../apiserver/pkg/admission/interfaces.go | 172 + .../pkg/admission/metrics/metrics.go | 251 + .../plugin/namespace/lifecycle/admission.go | 232 + .../pkg/admission/plugin/webhook/accessors.go | 297 + .../config/apis/webhookadmission/doc.go | 19 + .../config/apis/webhookadmission/register.go | 53 + .../config/apis/webhookadmission/types.go | 29 + .../config/apis/webhookadmission/v1/doc.go | 23 + .../apis/webhookadmission/v1/register.go | 50 + .../config/apis/webhookadmission/v1/types.go | 29 + .../v1/zz_generated.conversion.go | 67 + .../v1/zz_generated.deepcopy.go | 50 + .../v1/zz_generated.defaults.go | 32 + .../apis/webhookadmission/v1alpha1/doc.go | 23 + .../webhookadmission/v1alpha1/register.go | 50 + .../apis/webhookadmission/v1alpha1/types.go | 29 + .../v1alpha1/zz_generated.conversion.go | 67 + .../v1alpha1/zz_generated.deepcopy.go | 50 + .../v1alpha1/zz_generated.defaults.go | 32 + .../webhookadmission/zz_generated.deepcopy.go | 50 + .../plugin/webhook/config/kubeconfig.go | 71 + .../admission/plugin/webhook/errors/doc.go | 18 + .../plugin/webhook/errors/statuserror.go | 63 + .../plugin/webhook/generic/conversion.go | 112 + .../plugin/webhook/generic/interfaces.go | 75 + .../plugin/webhook/generic/webhook.go | 230 + .../plugin/webhook/mutating/dispatcher.go | 430 + .../admission/plugin/webhook/mutating/doc.go | 19 + .../plugin/webhook/mutating/plugin.go | 76 + .../webhook/mutating/reinvocationcontext.go | 68 + .../admission/plugin/webhook/namespace/doc.go | 20 + .../plugin/webhook/namespace/matcher.go | 121 + .../admission/plugin/webhook/object/doc.go | 20 + .../plugin/webhook/object/matcher.go | 57 + .../plugin/webhook/request/admissionreview.go | 283 + .../admission/plugin/webhook/request/doc.go | 18 + .../admission/plugin/webhook/rules/rules.go | 129 + .../plugin/webhook/validating/dispatcher.go | 238 + .../plugin/webhook/validating/doc.go | 19 + .../plugin/webhook/validating/plugin.go | 67 + .../k8s.io/apiserver/pkg/admission/plugins.go | 208 + .../apiserver/pkg/admission/reinvocation.go | 64 + vendor/k8s.io/apiserver/pkg/admission/util.go | 47 + .../apiserver/pkg/apis/apiserver/doc.go | 21 + .../pkg/apis/apiserver/install/install.go | 43 + .../apiserver/pkg/apis/apiserver/register.go | 49 + .../apiserver/pkg/apis/apiserver/types.go | 149 + .../apiserver/pkg/apis/apiserver/v1/doc.go | 23 + .../pkg/apis/apiserver/v1/register.go | 52 + .../apiserver/pkg/apis/apiserver/v1/types.go | 50 + .../apiserver/v1/zz_generated.conversion.go | 103 + .../apiserver/v1/zz_generated.deepcopy.go | 78 + .../apiserver/v1/zz_generated.defaults.go | 32 + .../pkg/apis/apiserver/v1alpha1/doc.go | 23 + .../pkg/apis/apiserver/v1alpha1/register.go | 53 + .../pkg/apis/apiserver/v1alpha1/types.go | 149 + .../v1alpha1/zz_generated.conversion.go | 329 + .../v1alpha1/zz_generated.deepcopy.go | 227 + .../v1alpha1/zz_generated.defaults.go | 32 + .../pkg/apis/apiserver/v1beta1/doc.go | 23 + .../pkg/apis/apiserver/v1beta1/register.go | 52 + .../pkg/apis/apiserver/v1beta1/types.go | 120 + .../v1beta1/zz_generated.conversion.go | 265 + .../v1beta1/zz_generated.deepcopy.go | 174 + .../v1beta1/zz_generated.defaults.go | 32 + .../apis/apiserver/zz_generated.deepcopy.go | 227 + vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS | 9 + vendor/k8s.io/apiserver/pkg/apis/audit/doc.go | 20 + .../apiserver/pkg/apis/audit/helpers.go | 38 + .../pkg/apis/audit/install/install.go | 37 + .../apiserver/pkg/apis/audit/register.go | 53 + .../k8s.io/apiserver/pkg/apis/audit/types.go | 286 + .../k8s.io/apiserver/pkg/apis/audit/v1/doc.go | 25 + .../pkg/apis/audit/v1/generated.pb.go | 3184 +++ .../pkg/apis/audit/v1/generated.proto | 249 + .../apiserver/pkg/apis/audit/v1/register.go | 58 + .../apiserver/pkg/apis/audit/v1/types.go | 280 + .../apis/audit/v1/zz_generated.conversion.go | 322 + .../apis/audit/v1/zz_generated.deepcopy.go | 291 + .../apis/audit/v1/zz_generated.defaults.go | 32 + .../pkg/apis/audit/v1alpha1/conversion.go | 78 + .../apiserver/pkg/apis/audit/v1alpha1/doc.go | 25 + .../pkg/apis/audit/v1alpha1/generated.pb.go | 3241 +++ .../pkg/apis/audit/v1alpha1/generated.proto | 250 + .../pkg/apis/audit/v1alpha1/register.go | 58 + .../pkg/apis/audit/v1alpha1/types.go | 287 + .../audit/v1alpha1/zz_generated.conversion.go | 339 + .../audit/v1alpha1/zz_generated.deepcopy.go | 293 + .../audit/v1alpha1/zz_generated.defaults.go | 32 + .../pkg/apis/audit/v1beta1/conversion.go | 45 + .../apiserver/pkg/apis/audit/v1beta1/doc.go | 25 + .../pkg/apis/audit/v1beta1/generated.pb.go | 3282 +++ .../pkg/apis/audit/v1beta1/generated.proto | 259 + .../pkg/apis/audit/v1beta1/register.go | 58 + .../apiserver/pkg/apis/audit/v1beta1/types.go | 288 + .../audit/v1beta1/zz_generated.conversion.go | 334 + .../audit/v1beta1/zz_generated.deepcopy.go | 293 + .../audit/v1beta1/zz_generated.defaults.go | 32 + .../pkg/apis/audit/validation/validation.go | 133 + .../pkg/apis/audit/zz_generated.deepcopy.go | 291 + .../k8s.io/apiserver/pkg/apis/config/doc.go | 19 + .../apiserver/pkg/apis/config/register.go | 53 + .../k8s.io/apiserver/pkg/apis/config/types.go | 100 + .../apiserver/pkg/apis/config/v1/defaults.go | 44 + .../apiserver/pkg/apis/config/v1/doc.go | 23 + .../apiserver/pkg/apis/config/v1/register.go | 53 + .../apiserver/pkg/apis/config/v1/types.go | 100 + .../apis/config/v1/zz_generated.conversion.go | 296 + .../apis/config/v1/zz_generated.deepcopy.go | 227 + .../apis/config/v1/zz_generated.defaults.go | 45 + .../pkg/apis/config/validation/validation.go | 220 + .../pkg/apis/config/zz_generated.deepcopy.go | 227 + .../pkg/apis/flowcontrol/bootstrap/default.go | 475 + vendor/k8s.io/apiserver/pkg/audit/OWNERS | 9 + vendor/k8s.io/apiserver/pkg/audit/context.go | 84 + vendor/k8s.io/apiserver/pkg/audit/format.go | 73 + vendor/k8s.io/apiserver/pkg/audit/metrics.go | 110 + .../apiserver/pkg/audit/policy/checker.go | 219 + .../apiserver/pkg/audit/policy/enforce.go | 53 + .../apiserver/pkg/audit/policy/reader.go | 90 + .../k8s.io/apiserver/pkg/audit/policy/util.go | 68 + vendor/k8s.io/apiserver/pkg/audit/request.go | 246 + vendor/k8s.io/apiserver/pkg/audit/scheme.go | 42 + vendor/k8s.io/apiserver/pkg/audit/types.go | 46 + vendor/k8s.io/apiserver/pkg/audit/union.go | 70 + .../authenticator/audagnostic.go | 90 + .../authentication/authenticator/audiences.go | 63 + .../authenticator/interfaces.go | 65 + .../authenticatorfactory/delegating.go | 120 + .../authenticatorfactory/loopback.go | 29 + .../authenticatorfactory/requestheader.go | 48 + .../group/authenticated_group_adder.go | 61 + .../pkg/authentication/group/group_adder.go | 51 + .../authentication/group/token_group_adder.go | 51 + .../request/anonymous/anonymous.go | 43 + .../request/bearertoken/bearertoken.go | 67 + .../request/headerrequest/requestheader.go | 239 + .../headerrequest/requestheader_controller.go | 337 + .../pkg/authentication/request/union/union.go | 71 + .../request/websocket/protocol.go | 108 + .../pkg/authentication/request/x509/OWNERS | 9 + .../pkg/authentication/request/x509/doc.go | 19 + .../request/x509/verify_options.go | 71 + .../pkg/authentication/request/x509/x509.go | 258 + .../pkg/authentication/serviceaccount/util.go | 183 + .../token/cache/cache_simple.go | 49 + .../token/cache/cache_striped.go | 60 + .../token/cache/cached_token_authenticator.go | 272 + .../pkg/authentication/token/cache/stats.go | 125 + .../token/tokenfile/tokenfile.go | 99 + .../apiserver/pkg/authentication/user/doc.go | 19 + .../apiserver/pkg/authentication/user/user.go | 84 + .../authorization/authorizer/interfaces.go | 159 + .../pkg/authorization/authorizer/rule.go | 73 + .../authorization/authorizerfactory/OWNERS | 5 + .../authorizerfactory/builtin.go | 95 + .../authorizerfactory/delegating.go | 58 + .../apiserver/pkg/authorization/path/doc.go | 18 + .../apiserver/pkg/authorization/path/path.go | 67 + .../pkg/authorization/union/union.go | 106 + .../pkg/endpoints/deprecation/deprecation.go | 133 + .../pkg/endpoints/discovery/addresses.go | 72 + .../pkg/endpoints/discovery/group.go | 73 + .../pkg/endpoints/discovery/legacy.go | 76 + .../apiserver/pkg/endpoints/discovery/root.go | 135 + .../endpoints/discovery/storageversionhash.go | 40 + .../apiserver/pkg/endpoints/discovery/util.go | 110 + .../pkg/endpoints/discovery/version.go | 83 + vendor/k8s.io/apiserver/pkg/endpoints/doc.go | 18 + .../endpoints/filterlatency/filterlatency.go | 96 + .../apiserver/pkg/endpoints/filters/OWNERS | 6 + .../apiserver/pkg/endpoints/filters/audit.go | 256 + .../endpoints/filters/audit_annotations.go | 39 + .../pkg/endpoints/filters/authentication.go | 94 + .../pkg/endpoints/filters/authn_audit.go | 86 + .../pkg/endpoints/filters/authorization.go | 106 + .../pkg/endpoints/filters/cachecontrol.go | 33 + .../apiserver/pkg/endpoints/filters/doc.go | 21 + .../pkg/endpoints/filters/impersonation.go | 239 + .../pkg/endpoints/filters/metrics.go | 115 + .../filters/request_received_time.go | 40 + .../pkg/endpoints/filters/requestinfo.go | 41 + .../pkg/endpoints/filters/storageversion.go | 121 + .../pkg/endpoints/filters/warning.go | 133 + .../apiserver/pkg/endpoints/groupversion.go | 137 + .../pkg/endpoints/handlers/create.go | 247 + .../pkg/endpoints/handlers/delete.go | 290 + .../apiserver/pkg/endpoints/handlers/doc.go | 18 + .../endpoints/handlers/fieldmanager/OWNERS | 5 + .../handlers/fieldmanager/buildmanagerinfo.go | 72 + .../handlers/fieldmanager/capmanagers.go | 134 + .../handlers/fieldmanager/endpoints.yaml | 7018 ++++++ .../handlers/fieldmanager/fieldmanager.go | 243 + .../fieldmanager/internal/atmostevery.go | 60 + .../fieldmanager/internal/conflict.go | 85 + .../handlers/fieldmanager/internal/fields.go | 47 + .../fieldmanager/internal/gvkparser.go | 127 + .../fieldmanager/internal/managedfields.go | 244 + .../fieldmanager/internal/pathelement.go | 140 + .../fieldmanager/lastappliedmanager.go | 172 + .../fieldmanager/lastappliedupdater.go | 117 + .../fieldmanager/managedfieldsupdater.go | 86 + .../endpoints/handlers/fieldmanager/node.yaml | 259 + .../endpoints/handlers/fieldmanager/pod.yaml | 121 + .../handlers/fieldmanager/skipnonapplied.go | 91 + .../handlers/fieldmanager/stripmeta.go | 90 + .../handlers/fieldmanager/structuredmerge.go | 171 + .../handlers/fieldmanager/typeconverter.go | 130 + .../handlers/fieldmanager/versionconverter.go | 101 + .../apiserver/pkg/endpoints/handlers/get.go | 287 + .../pkg/endpoints/handlers/helpers.go | 60 + .../apiserver/pkg/endpoints/handlers/namer.go | 155 + .../pkg/endpoints/handlers/negotiation/doc.go | 18 + .../endpoints/handlers/negotiation/errors.go | 99 + .../handlers/negotiation/negotiate.go | 263 + .../apiserver/pkg/endpoints/handlers/patch.go | 680 + .../pkg/endpoints/handlers/response.go | 275 + .../endpoints/handlers/responsewriters/doc.go | 18 + .../handlers/responsewriters/errors.go | 78 + .../handlers/responsewriters/status.go | 83 + .../handlers/responsewriters/writers.go | 286 + .../apiserver/pkg/endpoints/handlers/rest.go | 529 + .../pkg/endpoints/handlers/update.go | 269 + .../apiserver/pkg/endpoints/handlers/watch.go | 337 + .../apiserver/pkg/endpoints/installer.go | 1246 + .../apiserver/pkg/endpoints/metrics/OWNERS | 5 + .../pkg/endpoints/metrics/metrics.go | 707 + .../apiserver/pkg/endpoints/openapi/OWNERS | 4 + .../pkg/endpoints/openapi/openapi.go | 191 + .../apiserver/pkg/endpoints/request/OWNERS | 4 + .../pkg/endpoints/request/context.go | 96 + .../apiserver/pkg/endpoints/request/doc.go | 20 + .../pkg/endpoints/request/received_time.go | 45 + .../pkg/endpoints/request/requestinfo.go | 274 + .../pkg/endpoints/warning/warning.go | 39 + vendor/k8s.io/apiserver/pkg/features/OWNERS | 4 + .../apiserver/pkg/features/kube_features.go | 190 + vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS | 13 + .../apiserver/pkg/quota/v1/interfaces.go | 88 + .../apiserver/pkg/quota/v1/resources.go | 293 + .../apiserver/pkg/registry/generic/OWNERS | 28 + .../apiserver/pkg/registry/generic/doc.go | 19 + .../apiserver/pkg/registry/generic/matcher.go | 52 + .../apiserver/pkg/registry/generic/options.go | 54 + .../generic/registry/decorated_watcher.go | 102 + .../pkg/registry/generic/registry/doc.go | 19 + .../pkg/registry/generic/registry/dryrun.go | 121 + .../generic/registry/storage_factory.go | 137 + .../pkg/registry/generic/registry/store.go | 1412 ++ .../pkg/registry/generic/storage_decorator.go | 58 + .../k8s.io/apiserver/pkg/registry/rest/OWNERS | 23 + .../apiserver/pkg/registry/rest/create.go | 192 + .../pkg/registry/rest/create_update.go | 52 + .../apiserver/pkg/registry/rest/delete.go | 183 + .../k8s.io/apiserver/pkg/registry/rest/doc.go | 18 + .../apiserver/pkg/registry/rest/export.go | 34 + .../apiserver/pkg/registry/rest/meta.go | 43 + .../apiserver/pkg/registry/rest/rest.go | 351 + .../apiserver/pkg/registry/rest/table.go | 107 + .../apiserver/pkg/registry/rest/update.go | 279 + vendor/k8s.io/apiserver/pkg/server/config.go | 854 + .../apiserver/pkg/server/config_selfclient.go | 97 + .../pkg/server/deprecated_insecure_serving.go | 94 + vendor/k8s.io/apiserver/pkg/server/doc.go | 18 + .../server/dynamiccertificates/cert_key.go | 74 + .../server/dynamiccertificates/client_ca.go | 81 + .../configmap_cafile_content.go | 274 + .../dynamic_cafile_content.go | 255 + .../dynamic_serving_content.go | 180 + .../dynamic_sni_content.go | 50 + .../dynamiccertificates/named_certificates.go | 93 + .../dynamiccertificates/static_content.go | 114 + .../server/dynamiccertificates/tlsconfig.go | 284 + .../dynamiccertificates/union_content.go | 107 + .../pkg/server/dynamiccertificates/util.go | 68 + .../pkg/server/egressselector/config.go | 261 + .../server/egressselector/egress_selector.go | 373 + .../server/egressselector/metrics/metrics.go | 114 + .../apiserver/pkg/server/filters/OWNERS | 5 + .../pkg/server/filters/content_type.go | 28 + .../apiserver/pkg/server/filters/cors.go | 98 + .../apiserver/pkg/server/filters/doc.go | 19 + .../apiserver/pkg/server/filters/goaway.go | 84 + .../pkg/server/filters/longrunning.go | 41 + .../pkg/server/filters/maxinflight.go | 220 + .../server/filters/priority-and-fairness.go | 155 + .../apiserver/pkg/server/filters/timeout.go | 314 + .../apiserver/pkg/server/filters/waitgroup.go | 61 + .../apiserver/pkg/server/filters/wrap.go | 72 + .../apiserver/pkg/server/genericapiserver.go | 632 + vendor/k8s.io/apiserver/pkg/server/handler.go | 190 + vendor/k8s.io/apiserver/pkg/server/healthz.go | 159 + .../apiserver/pkg/server/healthz/doc.go | 21 + .../apiserver/pkg/server/healthz/healthz.go | 291 + vendor/k8s.io/apiserver/pkg/server/hooks.go | 244 + .../apiserver/pkg/server/httplog/doc.go | 19 + .../apiserver/pkg/server/httplog/httplog.go | 243 + vendor/k8s.io/apiserver/pkg/server/mux/OWNERS | 4 + vendor/k8s.io/apiserver/pkg/server/mux/doc.go | 18 + .../apiserver/pkg/server/mux/pathrecorder.go | 278 + .../apiserver/pkg/server/options/OWNERS | 16 + .../apiserver/pkg/server/options/admission.go | 234 + .../pkg/server/options/api_enablement.go | 115 + .../apiserver/pkg/server/options/audit.go | 607 + .../pkg/server/options/authentication.go | 421 + .../authentication_dynamic_request_header.go | 79 + .../pkg/server/options/authorization.go | 220 + .../apiserver/pkg/server/options/coreapi.go | 84 + .../options/deprecated_insecure_serving.go | 169 + .../apiserver/pkg/server/options/doc.go | 21 + .../pkg/server/options/egress_selector.go | 93 + .../server/options/encryptionconfig/OWNERS | 9 + .../server/options/encryptionconfig/config.go | 375 + .../apiserver/pkg/server/options/etcd.go | 329 + .../apiserver/pkg/server/options/feature.go | 72 + .../pkg/server/options/recommended.go | 150 + .../pkg/server/options/server_run_options.go | 215 + .../apiserver/pkg/server/options/serving.go | 356 + .../pkg/server/options/serving_unix.go | 31 + .../pkg/server/options/serving_windows.go | 30 + .../server/options/serving_with_loopback.go | 79 + vendor/k8s.io/apiserver/pkg/server/plugins.go | 32 + .../pkg/server/resourceconfig/doc.go | 18 + .../pkg/server/resourceconfig/helpers.go | 201 + .../k8s.io/apiserver/pkg/server/routes/OWNERS | 4 + .../k8s.io/apiserver/pkg/server/routes/doc.go | 18 + .../apiserver/pkg/server/routes/flags.go | 127 + .../apiserver/pkg/server/routes/index.go | 69 + .../apiserver/pkg/server/routes/metrics.go | 51 + .../apiserver/pkg/server/routes/openapi.go | 53 + .../apiserver/pkg/server/routes/profiling.go | 43 + .../apiserver/pkg/server/routes/version.go | 57 + .../apiserver/pkg/server/secure_serving.go | 289 + vendor/k8s.io/apiserver/pkg/server/signal.go | 69 + .../apiserver/pkg/server/signal_posix.go | 26 + .../apiserver/pkg/server/signal_windows.go | 23 + .../apiserver/pkg/server/storage/doc.go | 18 + .../pkg/server/storage/resource_config.go | 124 + .../storage/resource_encoding_config.go | 84 + .../pkg/server/storage/storage_codec.go | 98 + .../pkg/server/storage/storage_factory.go | 350 + vendor/k8s.io/apiserver/pkg/storage/OWNERS | 26 + .../apiserver/pkg/storage/cacher/cacher.go | 1441 ++ .../pkg/storage/cacher/caching_object.go | 397 + .../apiserver/pkg/storage/cacher/metrics.go | 74 + .../pkg/storage/cacher/time_budget.go | 100 + .../apiserver/pkg/storage/cacher/util.go | 60 + .../pkg/storage/cacher/watch_cache.go | 631 + vendor/k8s.io/apiserver/pkg/storage/doc.go | 18 + vendor/k8s.io/apiserver/pkg/storage/errors.go | 195 + .../apiserver/pkg/storage/errors/doc.go | 18 + .../apiserver/pkg/storage/errors/storage.go | 116 + .../k8s.io/apiserver/pkg/storage/etcd3/OWNERS | 7 + .../pkg/storage/etcd3/api_object_versioner.go | 131 + .../apiserver/pkg/storage/etcd3/compact.go | 162 + .../apiserver/pkg/storage/etcd3/errors.go | 71 + .../apiserver/pkg/storage/etcd3/event.go | 71 + .../pkg/storage/etcd3/healthcheck.go | 40 + .../pkg/storage/etcd3/lease_manager.go | 102 + .../apiserver/pkg/storage/etcd3/logger.go | 84 + .../pkg/storage/etcd3/metrics/metrics.go | 115 + .../apiserver/pkg/storage/etcd3/store.go | 939 + .../apiserver/pkg/storage/etcd3/watcher.go | 468 + .../apiserver/pkg/storage/interfaces.go | 275 + .../apiserver/pkg/storage/names/generate.go | 54 + .../pkg/storage/selection_predicate.go | 159 + .../pkg/storage/storagebackend/OWNERS | 8 + .../pkg/storage/storagebackend/config.go | 91 + .../storage/storagebackend/factory/etcd3.go | 291 + .../storage/storagebackend/factory/factory.go | 52 + vendor/k8s.io/apiserver/pkg/storage/util.go | 81 + .../pkg/storage/value/encrypt/aes/aes.go | 152 + .../value/encrypt/envelope/envelope.go | 200 + .../value/encrypt/envelope/grpc_service.go | 181 + .../storage/value/encrypt/envelope/metrics.go | 102 + .../encrypt/envelope/v1beta1/service.pb.go | 502 + .../encrypt/envelope/v1beta1/service.proto | 70 + .../value/encrypt/envelope/v1beta1/v1beta1.go | 23 + .../value/encrypt/identity/identity.go | 50 + .../value/encrypt/secretbox/secretbox.go | 69 + .../apiserver/pkg/storage/value/metrics.go | 141 + .../pkg/storage/value/transformer.go | 209 + .../apiserver/pkg/storageversion/OWNERS | 5 + .../apiserver/pkg/storageversion/manager.go | 277 + .../apiserver/pkg/storageversion/updater.go | 128 + .../apiserver/pkg/util/apihelpers/helpers.go | 100 + .../apiserver/pkg/util/dryrun/dryrun.go | 22 + .../pkg/util/flowcontrol/apf_controller.go | 764 + .../util/flowcontrol/apf_controller_debug.go | 268 + .../pkg/util/flowcontrol/apf_filter.go | 132 + .../pkg/util/flowcontrol/counter/interface.go | 33 + .../pkg/util/flowcontrol/counter/noop.go | 25 + .../pkg/util/flowcontrol/debug/dump.go | 48 + .../flowcontrol/fairqueuing/integrator.go | 180 + .../util/flowcontrol/fairqueuing/interface.go | 135 + .../fairqueuing/promise/interface.go | 129 + .../promise/lockingpromise/lockingpromise.go | 124 + .../flowcontrol/fairqueuing/queueset/doc.go | 120 + .../fairqueuing/queueset/queueset.go | 781 + .../flowcontrol/fairqueuing/queueset/types.go | 128 + .../pkg/util/flowcontrol/format/formatting.go | 231 + .../pkg/util/flowcontrol/formatting.go | 40 + .../pkg/util/flowcontrol/metrics/metrics.go | 261 + .../metrics/sample_and_watermark.go | 211 + .../flowcontrol/metrics/timed_observer.go | 52 + .../apiserver/pkg/util/flowcontrol/rule.go | 203 + .../apiserver/pkg/util/flushwriter/doc.go | 19 + .../apiserver/pkg/util/flushwriter/writer.go | 53 + .../apiserver/pkg/util/openapi/proto.go | 54 + .../util/shufflesharding/shufflesharding.go | 107 + .../pkg/util/webhook/authentication.go | 263 + .../apiserver/pkg/util/webhook/client.go | 224 + .../apiserver/pkg/util/webhook/error.go | 47 + .../apiserver/pkg/util/webhook/gencerts.sh | 107 + .../pkg/util/webhook/serviceresolver.go | 47 + .../apiserver/pkg/util/webhook/validation.go | 105 + .../apiserver/pkg/util/webhook/webhook.go | 162 + .../apiserver/pkg/util/wsstream/conn.go | 352 + .../k8s.io/apiserver/pkg/util/wsstream/doc.go | 21 + .../apiserver/pkg/util/wsstream/stream.go | 177 + .../k8s.io/apiserver/pkg/warning/context.go | 59 + .../plugin/pkg/audit/buffered/buffered.go | 290 + .../plugin/pkg/audit/buffered/doc.go | 19 + .../apiserver/plugin/pkg/audit/log/backend.go | 104 + .../plugin/pkg/audit/truncate/doc.go | 19 + .../plugin/pkg/audit/truncate/truncate.go | 160 + .../plugin/pkg/audit/webhook/webhook.go | 139 + .../authenticator/token/webhook/webhook.go | 257 + .../plugin/pkg/authorizer/webhook/gencerts.sh | 102 + .../plugin/pkg/authorizer/webhook/webhook.go | 386 + vendor/k8s.io/client-go/tools/events/OWNERS | 8 + vendor/k8s.io/client-go/tools/events/doc.go | 19 + .../tools/events/event_broadcaster.go | 384 + .../client-go/tools/events/event_recorder.go | 88 + vendor/k8s.io/client-go/tools/events/fake.go | 45 + .../client-go/tools/events/interfaces.go | 90 + .../metrics/prometheus/workqueue/metrics.go | 130 + .../metrics/testutil/metrics.go | 368 + .../metrics/testutil/promlint.go | 156 + .../metrics/testutil/testutil.go | 86 + vendor/k8s.io/kube-openapi/pkg/builder/doc.go | 20 + .../kube-openapi/pkg/builder/openapi.go | 445 + .../k8s.io/kube-openapi/pkg/builder/util.go | 61 + .../pkg/handler/default_pruning.go | 208 + .../kube-openapi/pkg/handler/handler.go | 267 + .../k8s.io/kube-openapi/pkg/schemaconv/smd.go | 452 + vendor/k8s.io/kube-openapi/pkg/util/trie.go | 79 + vendor/k8s.io/kube-openapi/pkg/util/util.go | 105 + vendor/k8s.io/utils/net/ipnet.go | 221 + vendor/k8s.io/utils/net/net.go | 213 + vendor/k8s.io/utils/net/port.go | 137 + vendor/k8s.io/utils/path/file.go | 78 + vendor/modules.txt | 247 + .../konnectivity-client/LICENSE | 201 + .../konnectivity-client/pkg/client/client.go | 205 + .../konnectivity-client/pkg/client/conn.go | 141 + .../proto/client/client.pb.go | 653 + .../proto/client/client.proto | 95 + .../structured-merge-diff/v4/fieldpath/doc.go | 21 + .../v4/fieldpath/element.go | 317 + .../v4/fieldpath/fromvalue.go | 134 + .../v4/fieldpath/managers.go | 144 + .../v4/fieldpath/path.go | 118 + .../v4/fieldpath/pathelementmap.go | 85 + .../v4/fieldpath/serialize-pe.go | 168 + .../v4/fieldpath/serialize.go | 238 + .../structured-merge-diff/v4/fieldpath/set.go | 406 + .../v4/merge/conflict.go | 121 + .../structured-merge-diff/v4/merge/update.go | 329 + .../structured-merge-diff/v4/schema/doc.go | 28 + .../v4/schema/elements.go | 261 + .../structured-merge-diff/v4/schema/equals.go | 199 + .../v4/schema/schemaschema.go | 161 + .../structured-merge-diff/v4/typed/doc.go | 18 + .../structured-merge-diff/v4/typed/helpers.go | 256 + .../structured-merge-diff/v4/typed/merge.go | 353 + .../structured-merge-diff/v4/typed/parser.go | 151 + .../v4/typed/reconcile_schema.go | 295 + .../structured-merge-diff/v4/typed/remove.go | 112 + .../v4/typed/tofieldset.go | 166 + .../structured-merge-diff/v4/typed/typed.go | 315 + .../structured-merge-diff/v4/typed/union.go | 276 + .../v4/typed/validate.go | 195 + 927 files changed, 189381 insertions(+), 6 deletions(-) create mode 100644 vendor/github.com/NYTimes/gziphandler/.gitignore create mode 100644 vendor/github.com/NYTimes/gziphandler/.travis.yml create mode 100644 vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md create mode 100644 vendor/github.com/NYTimes/gziphandler/LICENSE.md create mode 100644 vendor/github.com/NYTimes/gziphandler/README.md create mode 100644 vendor/github.com/NYTimes/gziphandler/gzip.go create mode 100644 vendor/github.com/NYTimes/gziphandler/gzip_go18.go create mode 100644 vendor/github.com/coreos/go-semver/LICENSE create mode 100644 vendor/github.com/coreos/go-semver/NOTICE create mode 100644 vendor/github.com/coreos/go-semver/semver/semver.go create mode 100644 vendor/github.com/coreos/go-semver/semver/sort.go create mode 100644 vendor/github.com/coreos/go-systemd/LICENSE create mode 100644 vendor/github.com/coreos/go-systemd/NOTICE create mode 100644 vendor/github.com/coreos/go-systemd/daemon/sdnotify.go create mode 100644 vendor/github.com/coreos/go-systemd/daemon/watchdog.go create mode 100644 vendor/github.com/coreos/go-systemd/journal/journal.go create mode 100644 vendor/github.com/coreos/pkg/LICENSE create mode 100644 vendor/github.com/coreos/pkg/NOTICE create mode 100644 vendor/github.com/coreos/pkg/capnslog/README.md create mode 100644 vendor/github.com/coreos/pkg/capnslog/formatters.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/glog_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init_windows.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/journald_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/log_hijack.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/logmap.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/pkg_logger.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/go.mod create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go create mode 100644 vendor/github.com/munnerz/goautoneg/LICENSE create mode 100644 vendor/github.com/munnerz/goautoneg/Makefile create mode 100644 vendor/github.com/munnerz/goautoneg/README.txt create mode 100644 vendor/github.com/munnerz/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go create mode 100644 vendor/go.etcd.io/etcd/LICENSE create mode 100644 vendor/go.etcd.io/etcd/NOTICE create mode 100644 vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go create mode 100644 vendor/go.etcd.io/etcd/auth/authpb/auth.proto create mode 100644 vendor/go.etcd.io/etcd/clientv3/README.md create mode 100644 vendor/go.etcd.io/etcd/clientv3/auth.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/utils.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/client.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/cluster.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/compact_op.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/compare.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/config.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/ctx.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/doc.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/kv.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/lease.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/logger.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/maintenance.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/op.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/options.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/retry.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/sort.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/txn.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/utils.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/watch.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto create mode 100644 vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go create mode 100644 vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/purge.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/sync.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/log_level.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/logger.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/zap.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go create mode 100644 vendor/go.etcd.io/etcd/pkg/systemd/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/systemd/journal.go create mode 100644 vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go create mode 100644 vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/listener.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/tls.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/transport.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/id.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/set.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/slice.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/urls.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/urlsmap.go create mode 100644 vendor/go.etcd.io/etcd/raft/OWNERS create mode 100644 vendor/go.etcd.io/etcd/raft/README.md create mode 100644 vendor/go.etcd.io/etcd/raft/bootstrap.go create mode 100644 vendor/go.etcd.io/etcd/raft/confchange/confchange.go create mode 100644 vendor/go.etcd.io/etcd/raft/confchange/restore.go create mode 100644 vendor/go.etcd.io/etcd/raft/design.md create mode 100644 vendor/go.etcd.io/etcd/raft/doc.go create mode 100644 vendor/go.etcd.io/etcd/raft/log.go create mode 100644 vendor/go.etcd.io/etcd/raft/log_unstable.go create mode 100644 vendor/go.etcd.io/etcd/raft/logger.go create mode 100644 vendor/go.etcd.io/etcd/raft/node.go create mode 100644 vendor/go.etcd.io/etcd/raft/quorum/joint.go create mode 100644 vendor/go.etcd.io/etcd/raft/quorum/majority.go create mode 100644 vendor/go.etcd.io/etcd/raft/quorum/quorum.go create mode 100644 vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go create mode 100644 vendor/go.etcd.io/etcd/raft/raft.go create mode 100644 vendor/go.etcd.io/etcd/raft/raftpb/confchange.go create mode 100644 vendor/go.etcd.io/etcd/raft/raftpb/confstate.go create mode 100644 vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go create mode 100644 vendor/go.etcd.io/etcd/raft/raftpb/raft.proto create mode 100644 vendor/go.etcd.io/etcd/raft/rawnode.go create mode 100644 vendor/go.etcd.io/etcd/raft/read_only.go create mode 100644 vendor/go.etcd.io/etcd/raft/status.go create mode 100644 vendor/go.etcd.io/etcd/raft/storage.go create mode 100644 vendor/go.etcd.io/etcd/raft/tracker/inflights.go create mode 100644 vendor/go.etcd.io/etcd/raft/tracker/progress.go create mode 100644 vendor/go.etcd.io/etcd/raft/tracker/state.go create mode 100644 vendor/go.etcd.io/etcd/raft/tracker/tracker.go create mode 100644 vendor/go.etcd.io/etcd/raft/util.go create mode 100644 vendor/go.etcd.io/etcd/version/version.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/builder.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/string.go create mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing.go create mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go create mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go create mode 100644 vendor/golang.org/x/crypto/poly1305/bits_compat.go create mode 100644 vendor/golang.org/x/crypto/poly1305/bits_go1.13.go create mode 100644 vendor/golang.org/x/crypto/poly1305/mac_noasm.go create mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.s create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_generic.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.s create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/net/websocket/client.go create mode 100644 vendor/golang.org/x/net/websocket/dial.go create mode 100644 vendor/golang.org/x/net/websocket/hybi.go create mode 100644 vendor/golang.org/x/net/websocket/server.go create mode 100644 vendor/golang.org/x/net/websocket/websocket.go create mode 100644 vendor/golang.org/x/sync/AUTHORS create mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/singleflight/singleflight.go create mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s create mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go create mode 100644 vendor/google.golang.org/genproto/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/grpc/.travis.yml create mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md create mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md create mode 100644 vendor/google.golang.org/grpc/Makefile create mode 100644 vendor/google.golang.org/grpc/README.md create mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go create mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100644 vendor/google.golang.org/grpc/codec.go create mode 100644 vendor/google.golang.org/grpc/codegen.sh create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/credentials/go12.go create mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn.go create mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls.go create mode 100644 vendor/google.golang.org/grpc/dialoptions.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go create mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go create mode 100644 vendor/google.golang.org/grpc/go.mod create mode 100644 vendor/google.golang.org/grpc/go.sum create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/install_gae.sh create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/util.go create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/go113.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/log.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/naming/naming.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/preloader.go create mode 100644 vendor/google.golang.org/grpc/proxy.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/service_config.go create mode 100644 vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 vendor/google.golang.org/grpc/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/status/status.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/tap/tap.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/version.go create mode 100644 vendor/google.golang.org/grpc/vet.sh create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/README.md create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go create mode 100644 vendor/k8s.io/api/admission/v1/doc.go create mode 100644 vendor/k8s.io/api/admission/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/admission/v1/generated.proto create mode 100644 vendor/k8s.io/api/admission/v1/register.go create mode 100644 vendor/k8s.io/api/admission/v1/types.go create mode 100644 vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/admission/v1beta1/register.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/types.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/generic.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/rand/rand.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/waitgroup/waitgroup.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/attributes.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/audit.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/chain.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/configuration/configuration_manager.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/decorator.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/handler.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/statuserror.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/plugin.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/reinvocationcontext.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/matcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/plugin.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugins.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/reinvocation.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/install/install.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/audit/context.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/format.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/policy/checker.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/policy/enforce.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/policy/reader.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/policy/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/request.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/scheme.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/union.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/loopback.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/verify_options.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/cache/stats.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/user.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/path/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/path/path.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/union/union.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/addresses.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/group.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/legacy.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/storageversionhash.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/version.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_annotations.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/cachecontrol.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/requestinfo.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/storageversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/warning.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/conflict.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fields.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/gvkparser.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/pathelement.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedmanager.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/installer.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/metrics/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/openapi/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/context.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/warning/warning.go create mode 100644 vendor/k8s.io/apiserver/pkg/features/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/features/kube_features.go create mode 100644 vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/quota/v1/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/quota/v1/resources.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/matcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/options.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/create.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/create_update.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/delete.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/export.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/meta.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/rest.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/table.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/update.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/config_selfclient.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/egressselector/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/content_type.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/cors.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/goaway.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/longrunning.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/timeout.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/wrap.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/genericapiserver.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/handler.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/healthz.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/healthz/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/hooks.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/httplog/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/mux/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/mux/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/admission.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/audit.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/authentication.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/authorization.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/coreapi.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/egress_selector.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/etcd.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/feature.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/recommended.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/plugins.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/resourceconfig/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/resourceconfig/helpers.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/flags.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/index.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/openapi.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/profiling.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/version.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/secure_serving.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/signal.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/signal_posix.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/signal_windows.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/storage/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/storage/resource_config.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/storage/storage_codec.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors/storage.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/api_object_versioner.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/healthcheck.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/names/generate.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.pb.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/v1beta1.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/identity.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/transformer.go create mode 100644 vendor/k8s.io/apiserver/pkg/storageversion/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/storageversion/manager.go create mode 100644 vendor/k8s.io/apiserver/pkg/storageversion/updater.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/dryrun/dryrun.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/interface.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/noop.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/interface.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise/lockingpromise.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/formatting.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flushwriter/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flushwriter/writer.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/openapi/proto.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/shufflesharding/shufflesharding.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/client.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/error.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/gencerts.sh create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/validation.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go create mode 100644 vendor/k8s.io/apiserver/pkg/warning/context.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/doc.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/gencerts.sh create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go create mode 100644 vendor/k8s.io/client-go/tools/events/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/events/doc.go create mode 100644 vendor/k8s.io/client-go/tools/events/event_broadcaster.go create mode 100644 vendor/k8s.io/client-go/tools/events/event_recorder.go create mode 100644 vendor/k8s.io/client-go/tools/events/fake.go create mode 100644 vendor/k8s.io/client-go/tools/events/interfaces.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheus/workqueue/metrics.go create mode 100644 vendor/k8s.io/component-base/metrics/testutil/metrics.go create mode 100644 vendor/k8s.io/component-base/metrics/testutil/promlint.go create mode 100644 vendor/k8s.io/component-base/metrics/testutil/testutil.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/builder/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/builder/openapi.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/builder/util.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/handler/default_pruning.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/handler/handler.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/trie.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/util.go create mode 100644 vendor/k8s.io/utils/net/ipnet.go create mode 100644 vendor/k8s.io/utils/net/net.go create mode 100644 vendor/k8s.io/utils/net/port.go create mode 100644 vendor/k8s.io/utils/path/file.go create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/LICENSE create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/doc.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/element.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/fromvalue.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/managers.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/path.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize-pe.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/schema/doc.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/doc.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go diff --git a/README.md b/README.md index f670b80192..6d653d7a1a 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,7 @@ Table of Contents * [Priority filtering](#priority-filtering) * [Pod Evictions](#pod-evictions) * [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb) + * [Metrics](#metrics) * [Compatibility Matrix](#compatibility-matrix) * [Getting Involved and Contributing](#getting-involved-and-contributing) * [Communicating With Contributors](#communicating-with-contributors) @@ -348,9 +349,9 @@ strategies: ### RemovePodsHavingTooManyRestarts -This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that -can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters -include `podRestartThreshold`, which is the number of restarts at which a pod should be evicted, and `includingInitContainers`, +This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that +can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters +include `podRestartThreshold`, which is the number of restarts at which a pod should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored into that calculation. **Parameters:** @@ -521,6 +522,16 @@ Setting `--v=4` or greater on the Descheduler will log all reasons why any pod i Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods are evicted by using the eviction subresource to handle PDB. +## Metrics + +| name | type | description | +|-------|-------|----------------| +| build_info | gauge | constant 1 | +| pods_evicted | CounterVec | total number of pods evicted | + +The metrics are served through https://localhost:10258/metrics by default. +The address and port can be changed by setting `--binding-address` and `--secure-port` flags. + ## Compatibility Matrix The below compatibility matrix shows the k8s client package(client-go, apimachinery, etc) versions that descheduler is compiled with. At this time descheduler does not have a hard dependency to a specific k8s release. However a diff --git a/cmd/descheduler/app/options/options.go b/cmd/descheduler/app/options/options.go index 0a0184ef88..2825a9367c 100644 --- a/cmd/descheduler/app/options/options.go +++ b/cmd/descheduler/app/options/options.go @@ -19,20 +19,29 @@ package options import ( "github.com/spf13/pflag" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + apiserveroptions "k8s.io/apiserver/pkg/server/options" clientset "k8s.io/client-go/kubernetes" - "k8s.io/component-base/logs" + "sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1" deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme" ) +const ( + DefaultDeschedulerPort = 10258 +) + // DeschedulerServer configuration type DeschedulerServer struct { componentconfig.DeschedulerConfiguration - Client clientset.Interface - Logs *logs.Options + + Client clientset.Interface + Logs *logs.Options + SecureServing *apiserveroptions.SecureServingOptionsWithLoopback + DisableMetrics bool } // NewDeschedulerServer creates a new DeschedulerServer with default parameters @@ -41,9 +50,14 @@ func NewDeschedulerServer() (*DeschedulerServer, error) { if err != nil { return nil, err } + + secureServing := apiserveroptions.NewSecureServingOptions().WithLoopback() + secureServing.BindPort = DefaultDeschedulerPort + return &DeschedulerServer{ DeschedulerConfiguration: *cfg, Logs: logs.NewOptions(), + SecureServing: secureServing, }, nil } @@ -77,4 +91,7 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) { fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler") // evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default. fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "DEPRECATED: enables evicting pods using local storage by descheduler") + fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.") + + rs.SecureServing.AddFlags(fs) } diff --git a/cmd/descheduler/app/server.go b/cmd/descheduler/app/server.go index 0b47cff5fa..7d99e26d59 100644 --- a/cmd/descheduler/app/server.go +++ b/cmd/descheduler/app/server.go @@ -18,6 +18,7 @@ limitations under the License. package app import ( + "context" "flag" "io" @@ -26,7 +27,11 @@ import ( "github.com/spf13/cobra" + apiserver "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/mux" + restclient "k8s.io/client-go/rest" aflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/metrics/legacyregistry" "k8s.io/klog/v2" ) @@ -46,9 +51,30 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command { s.Logs.LogFormat = s.Logging.Format s.Logs.Apply() + // LoopbackClientConfig is a config for a privileged loopback connection + var LoopbackClientConfig *restclient.Config + var SecureServing *apiserver.SecureServingInfo + if err := s.SecureServing.ApplyTo(&SecureServing, &LoopbackClientConfig); err != nil { + klog.ErrorS(err, "failed to apply secure server configuration") + return + } + if err := s.Validate(); err != nil { klog.ErrorS(err, "failed to validate server configuration") + return + } + + if !s.DisableMetrics { + ctx := context.TODO() + pathRecorderMux := mux.NewPathRecorderMux("descheduler") + pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset()) + + if _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done()); err != nil { + klog.Fatalf("failed to start secure server: %v", err) + return + } } + err := Run(s) if err != nil { klog.ErrorS(err, "descheduler server") diff --git a/go.sum b/go.sum index 4a265c3f17..0d3e447a0b 100644 --- a/go.sum +++ b/go.sum @@ -40,8 +40,10 @@ github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8 github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -68,15 +70,19 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -88,6 +94,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -166,6 +173,7 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= @@ -196,10 +204,14 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= @@ -212,6 +224,7 @@ github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -225,6 +238,7 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -256,6 +270,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= @@ -298,9 +313,12 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -323,15 +341,19 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -427,7 +449,9 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -561,6 +585,7 @@ google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -568,6 +593,7 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -593,6 +619,7 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -645,6 +672,7 @@ k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14 h1:TihvEz9MPj2u0KWds6E2OBUXfwaL4qRJ33c7HGiJpqk= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/vendor/github.com/NYTimes/gziphandler/.gitignore b/vendor/github.com/NYTimes/gziphandler/.gitignore new file mode 100644 index 0000000000..1377554ebe --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/vendor/github.com/NYTimes/gziphandler/.travis.yml b/vendor/github.com/NYTimes/gziphandler/.travis.yml new file mode 100644 index 0000000000..d2b67f69c1 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.7 + - 1.8 + - tip diff --git a/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md b/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..cdbca194c3 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +--- +layout: code-of-conduct +version: v1.0 +--- + +This code of conduct outlines our expectations for participants within the **NYTimes/gziphandler** community, as well as steps to reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all and expect our code of conduct to be honored. Anyone who violates this code of conduct may be banned from the community. + +Our open source community strives to: + +* **Be friendly and patient.** +* **Be welcoming**: We strive to be a community that welcomes and supports people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, colour, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability. +* **Be considerate**: Your work will be used by other people, and you in turn will depend on the work of others. Any decision you take will affect users and colleagues, and you should take those consequences into account when making decisions. Remember that we're a world-wide community, so you might not be communicating in someone else's primary language. +* **Be respectful**: Not all of us will agree all the time, but disagreement is no excuse for poor behavior and poor manners. We might all experience some frustration now and then, but we cannot allow that frustration to turn into a personal attack. It’s important to remember that a community where people feel uncomfortable or threatened is not a productive one. +* **Be careful in the words that we choose**: we are a community of professionals, and we conduct ourselves professionally. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behavior aren't acceptable. +* **Try to understand why we disagree**: Disagreements, both social and technical, happen all the time. It is important that we resolve disagreements and differing views constructively. Remember that we’re different. The strength of our community comes from its diversity, people from a wide range of backgrounds. Different people have different perspectives on issues. Being unable to understand why someone holds a viewpoint doesn’t mean that they’re wrong. Don’t forget that it is human to err and blaming each other doesn’t get us anywhere. Instead, focus on helping to resolve issues and learning from mistakes. + +## Definitions + +Harassment includes, but is not limited to: + +- Offensive comments related to gender, gender identity and expression, sexual orientation, disability, mental illness, neuro(a)typicality, physical appearance, body size, race, age, regional discrimination, political or religious affiliation +- Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment +- Deliberate misgendering. This includes deadnaming or persistently using a pronoun that does not correctly reflect a person's gender identity. You must address people by the name they give you when not addressing them by their username or handle +- Physical contact and simulated physical contact (eg, textual descriptions like “*hug*” or “*backrub*”) without consent or after a request to stop +- Threats of violence, both physical and psychological +- Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm +- Deliberate intimidation +- Stalking or following +- Harassing photography or recording, including logging online activity for harassment purposes +- Sustained disruption of discussion +- Unwelcome sexual attention, including gratuitous or off-topic sexual images or behaviour +- Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others +- Continued one-on-one communication after requests to cease +- Deliberate “outing” of any aspect of a person’s identity without their consent except as necessary to protect others from intentional abuse +- Publication of non-harassing private communication + +Our open source community prioritizes marginalized people’s safety over privileged people’s comfort. We will not act on complaints regarding: + +- ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’ +- Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you” +- Refusal to explain or debate social justice concepts +- Communicating in a ‘tone’ you don’t find congenial +- Criticizing racist, sexist, cissexist, or otherwise oppressive behavior or assumptions + + +### Diversity Statement + +We encourage everyone to participate and are committed to building a community for all. Although we will fail at times, we seek to treat everyone both as fairly and equally as possible. Whenever a participant has made a mistake, we expect them to take responsibility for it. If someone has been harmed or offended, it is our responsibility to listen carefully and respectfully, and do our best to right the wrong. + +Although this list cannot be exhaustive, we explicitly honor diversity in age, gender, gender identity or expression, culture, ethnicity, language, national origin, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, and technical ability. We will not tolerate discrimination based on any of the protected +characteristics above, including participants with disabilities. + +### Reporting Issues + +If you experience or witness unacceptable behavior—or have any other concerns—please report it by contacting us via **code@nytimes.com**. All reports will be handled with discretion. In your report please include: + +- Your contact information. +- Names (real, nicknames, or pseudonyms) of any individuals involved. If there are additional witnesses, please +include them as well. Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public IRC logger), please include a link. +- Any additional information that may be helpful. + +After filing a report, a representative will contact you personally, review the incident, follow up with any additional questions, and make a decision as to how to respond. If the person who is harassing you is part of the response team, they will recuse themselves from handling your incident. If the complaint originates from a member of the response team, it will be handled by a different member of the response team. We will respect confidentiality requests for the purpose of protecting victims of abuse. + +### Attribution & Acknowledgements + +We all stand on the shoulders of giants across many open source communities. We'd like to thank the communities and projects that established code of conducts and diversity statements as our inspiration: + +* [Django](https://www.djangoproject.com/conduct/reporting/) +* [Python](https://www.python.org/community/diversity/) +* [Ubuntu](http://www.ubuntu.com/about/about-ubuntu/conduct) +* [Contributor Covenant](http://contributor-covenant.org/) +* [Geek Feminism](http://geekfeminism.org/about/code-of-conduct/) +* [Citizen Code of Conduct](http://citizencodeofconduct.org/) + +This Code of Conduct was based on https://github.com/todogroup/opencodeofconduct diff --git a/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md b/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md new file mode 100644 index 0000000000..b89a9eb4fb --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md @@ -0,0 +1,30 @@ +# Contributing to NYTimes/gziphandler + +This is an open source project started by handful of developers at The New York Times and open to the entire Go community. + +We really appreciate your help! + +## Filing issues + +When filing an issue, make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +## Contributing code + +Before submitting changes, please follow these guidelines: + +1. Check the open issues and pull requests for existing discussions. +2. Open an issue to discuss a new feature. +3. Write tests. +4. Make sure code follows the ['Go Code Review Comments'](https://github.com/golang/go/wiki/CodeReviewComments). +5. Make sure your changes pass `go test`. +6. Make sure the entire test suite passes locally and on Travis CI. +7. Open a Pull Request. +8. [Squash your commits](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) after receiving feedback and add a [great commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). + +Unless otherwise noted, the gziphandler source files are distributed under the Apache 2.0-style license found in the LICENSE.md file. diff --git a/vendor/github.com/NYTimes/gziphandler/LICENSE.md b/vendor/github.com/NYTimes/gziphandler/LICENSE.md new file mode 100644 index 0000000000..b7e2ecb63f --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/LICENSE.md @@ -0,0 +1,13 @@ +Copyright (c) 2015 The New York Times Company + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this library except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/NYTimes/gziphandler/README.md b/vendor/github.com/NYTimes/gziphandler/README.md new file mode 100644 index 0000000000..6d72460707 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/README.md @@ -0,0 +1,52 @@ +Gzip Handler +============ + +This is a tiny Go package which wraps HTTP handlers to transparently gzip the +response body, for clients which support it. Although it's usually simpler to +leave that to a reverse proxy (like nginx or Varnish), this package is useful +when that's undesirable. + + +## Usage + +Call `GzipHandler` with any handler (an object which implements the +`http.Handler` interface), and it'll return a new handler which gzips the +response. For example: + +```go +package main + +import ( + "io" + "net/http" + "github.com/NYTimes/gziphandler" +) + +func main() { + withoutGz := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "Hello, World") + }) + + withGz := gziphandler.GzipHandler(withoutGz) + + http.Handle("/", withGz) + http.ListenAndServe("0.0.0.0:8000", nil) +} +``` + + +## Documentation + +The docs can be found at [godoc.org][docs], as usual. + + +## License + +[Apache 2.0][license]. + + + + +[docs]: https://godoc.org/github.com/nytimes/gziphandler +[license]: https://github.com/nytimes/gziphandler/blob/master/LICENSE.md diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go new file mode 100644 index 0000000000..ea6dba1e79 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/gzip.go @@ -0,0 +1,332 @@ +package gziphandler + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" +) + +const ( + vary = "Vary" + acceptEncoding = "Accept-Encoding" + contentEncoding = "Content-Encoding" + contentType = "Content-Type" + contentLength = "Content-Length" +) + +type codings map[string]float64 + +const ( + // DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set. + // This is actually kind of ambiguous in RFC 2616, so hopefully it's correct. + // The examples seem to indicate that it is. + DefaultQValue = 1.0 + + // DefaultMinSize defines the minimum size to reach to enable compression. + // It's 512 bytes. + DefaultMinSize = 512 +) + +// gzipWriterPools stores a sync.Pool for each compression level for reuse of +// gzip.Writers. Use poolIndex to covert a compression level to an index into +// gzipWriterPools. +var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool + +func init() { + for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ { + addLevelPool(i) + } + addLevelPool(gzip.DefaultCompression) +} + +// poolIndex maps a compression level to its index into gzipWriterPools. It +// assumes that level is a valid gzip compression level. +func poolIndex(level int) int { + // gzip.DefaultCompression == -1, so we need to treat it special. + if level == gzip.DefaultCompression { + return gzip.BestCompression - gzip.BestSpeed + 1 + } + return level - gzip.BestSpeed +} + +func addLevelPool(level int) { + gzipWriterPools[poolIndex(level)] = &sync.Pool{ + New: func() interface{} { + // NewWriterLevel only returns error on a bad level, we are guaranteeing + // that this will be a valid level so it is okay to ignore the returned + // error. + w, _ := gzip.NewWriterLevel(nil, level) + return w + }, + } +} + +// GzipResponseWriter provides an http.ResponseWriter interface, which gzips +// bytes before writing them to the underlying response. This doesn't close the +// writers, so don't forget to do that. +// It can be configured to skip response smaller than minSize. +type GzipResponseWriter struct { + http.ResponseWriter + index int // Index for gzipWriterPools. + gw *gzip.Writer + + code int // Saves the WriteHeader value. + + minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed. + buf []byte // Holds the first part of the write before reaching the minSize or the end of the write. +} + +// Write appends data to the gzip writer. +func (w *GzipResponseWriter) Write(b []byte) (int, error) { + // If content type is not set. + if _, ok := w.Header()[contentType]; !ok { + // It infer it from the uncompressed body. + w.Header().Set(contentType, http.DetectContentType(b)) + } + + // GZIP responseWriter is initialized. Use the GZIP responseWriter. + if w.gw != nil { + n, err := w.gw.Write(b) + return n, err + } + + // Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter. + // On the first write, w.buf changes from nil to a valid slice + w.buf = append(w.buf, b...) + + // If the global writes are bigger than the minSize, compression is enable. + if len(w.buf) >= w.minSize { + err := w.startGzip() + if err != nil { + return 0, err + } + } + + return len(b), nil +} + +// startGzip initialize any GZIP specific informations. +func (w *GzipResponseWriter) startGzip() error { + + // Set the GZIP header. + w.Header().Set(contentEncoding, "gzip") + + // if the Content-Length is already set, then calls to Write on gzip + // will fail to set the Content-Length header since its already set + // See: https://github.com/golang/go/issues/14975. + w.Header().Del(contentLength) + + // Write the header to gzip response. + if w.code != 0 { + w.ResponseWriter.WriteHeader(w.code) + } + + // Initialize the GZIP response. + w.init() + + // Flush the buffer into the gzip reponse. + n, err := w.gw.Write(w.buf) + + // This should never happen (per io.Writer docs), but if the write didn't + // accept the entire buffer but returned no specific error, we have no clue + // what's going on, so abort just to be safe. + if err == nil && n < len(w.buf) { + return io.ErrShortWrite + } + + w.buf = nil + return err +} + +// WriteHeader just saves the response code until close or GZIP effective writes. +func (w *GzipResponseWriter) WriteHeader(code int) { + w.code = code +} + +// init graps a new gzip writer from the gzipWriterPool and writes the correct +// content encoding header. +func (w *GzipResponseWriter) init() { + // Bytes written during ServeHTTP are redirected to this gzip writer + // before being written to the underlying response. + gzw := gzipWriterPools[w.index].Get().(*gzip.Writer) + gzw.Reset(w.ResponseWriter) + w.gw = gzw +} + +// Close will close the gzip.Writer and will put it back in the gzipWriterPool. +func (w *GzipResponseWriter) Close() error { + if w.gw == nil { + // Gzip not trigged yet, write out regular response. + if w.code != 0 { + w.ResponseWriter.WriteHeader(w.code) + } + if w.buf != nil { + _, writeErr := w.ResponseWriter.Write(w.buf) + // Returns the error if any at write. + if writeErr != nil { + return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error()) + } + } + return nil + } + + err := w.gw.Close() + gzipWriterPools[w.index].Put(w.gw) + w.gw = nil + return err +} + +// Flush flushes the underlying *gzip.Writer and then the underlying +// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter +// an http.Flusher. +func (w *GzipResponseWriter) Flush() { + if w.gw != nil { + w.gw.Flush() + } + + if fw, ok := w.ResponseWriter.(http.Flusher); ok { + fw.Flush() + } +} + +// Hijack implements http.Hijacker. If the underlying ResponseWriter is a +// Hijacker, its Hijack method is returned. Otherwise an error is returned. +func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if hj, ok := w.ResponseWriter.(http.Hijacker); ok { + return hj.Hijack() + } + return nil, nil, fmt.Errorf("http.Hijacker interface is not supported") +} + +// verify Hijacker interface implementation +var _ http.Hijacker = &GzipResponseWriter{} + +// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in +// an error case it panics rather than returning an error. +func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler { + wrap, err := NewGzipLevelHandler(level) + if err != nil { + panic(err) + } + return wrap +} + +// NewGzipLevelHandler returns a wrapper function (often known as middleware) +// which can be used to wrap an HTTP handler to transparently gzip the response +// body if the client supports it (via the Accept-Encoding header). Responses will +// be encoded at the given gzip compression level. An error will be returned only +// if an invalid gzip compression level is given, so if one can ensure the level +// is valid, the returned error can be safely ignored. +func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) { + return NewGzipLevelAndMinSize(level, DefaultMinSize) +} + +// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller +// specify the minimum size before compression. +func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) { + if level != gzip.DefaultCompression && (level < gzip.BestSpeed || level > gzip.BestCompression) { + return nil, fmt.Errorf("invalid compression level requested: %d", level) + } + if minSize < 0 { + return nil, fmt.Errorf("minimum size must be more than zero") + } + return func(h http.Handler) http.Handler { + index := poolIndex(level) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add(vary, acceptEncoding) + + if acceptsGzip(r) { + gw := &GzipResponseWriter{ + ResponseWriter: w, + index: index, + minSize: minSize, + } + defer gw.Close() + + h.ServeHTTP(gw, r) + } else { + h.ServeHTTP(w, r) + } + }) + }, nil +} + +// GzipHandler wraps an HTTP handler, to transparently gzip the response body if +// the client supports it (via the Accept-Encoding header). This will compress at +// the default compression level. +func GzipHandler(h http.Handler) http.Handler { + wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression) + return wrapper(h) +} + +// acceptsGzip returns true if the given HTTP request indicates that it will +// accept a gzipped response. +func acceptsGzip(r *http.Request) bool { + acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding)) + return acceptedEncodings["gzip"] > 0.0 +} + +// parseEncodings attempts to parse a list of codings, per RFC 2616, as might +// appear in an Accept-Encoding header. It returns a map of content-codings to +// quality values, and an error containing the errors encountered. It's probably +// safe to ignore those, because silently ignoring errors is how the internet +// works. +// +// See: http://tools.ietf.org/html/rfc2616#section-14.3. +func parseEncodings(s string) (codings, error) { + c := make(codings) + var e []string + + for _, ss := range strings.Split(s, ",") { + coding, qvalue, err := parseCoding(ss) + + if err != nil { + e = append(e, err.Error()) + } else { + c[coding] = qvalue + } + } + + // TODO (adammck): Use a proper multi-error struct, so the individual errors + // can be extracted if anyone cares. + if len(e) > 0 { + return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", ")) + } + + return c, nil +} + +// parseCoding parses a single conding (content-coding with an optional qvalue), +// as might appear in an Accept-Encoding header. It attempts to forgive minor +// formatting errors. +func parseCoding(s string) (coding string, qvalue float64, err error) { + for n, part := range strings.Split(s, ";") { + part = strings.TrimSpace(part) + qvalue = DefaultQValue + + if n == 0 { + coding = strings.ToLower(part) + } else if strings.HasPrefix(part, "q=") { + qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64) + + if qvalue < 0.0 { + qvalue = 0.0 + } else if qvalue > 1.0 { + qvalue = 1.0 + } + } + } + + if coding == "" { + err = fmt.Errorf("empty content-coding") + } + + return +} diff --git a/vendor/github.com/NYTimes/gziphandler/gzip_go18.go b/vendor/github.com/NYTimes/gziphandler/gzip_go18.go new file mode 100644 index 0000000000..fa9665b7e8 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/gzip_go18.go @@ -0,0 +1,43 @@ +// +build go1.8 + +package gziphandler + +import "net/http" + +// Push initiates an HTTP/2 server push. +// Push returns ErrNotSupported if the client has disabled push or if push +// is not supported on the underlying connection. +func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error { + pusher, ok := w.ResponseWriter.(http.Pusher) + if ok && pusher != nil { + return pusher.Push(target, setAcceptEncodingForPushOptions(opts)) + } + return http.ErrNotSupported +} + +// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers. +func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions { + + if opts == nil { + opts = &http.PushOptions{ + Header: http.Header{ + acceptEncoding: []string{"gzip"}, + }, + } + return opts + } + + if opts.Header == nil { + opts.Header = http.Header{ + acceptEncoding: []string{"gzip"}, + } + return opts + } + + if encoding := opts.Header.Get(acceptEncoding); encoding == "" { + opts.Header.Add(acceptEncoding, "gzip") + return opts + } + + return opts +} diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE new file mode 100644 index 0000000000..23a0ada2fb --- /dev/null +++ b/vendor/github.com/coreos/go-semver/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 0000000000..76cf4852c7 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,296 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + + parsed := make([]int64, 3, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 0000000000..e256b41a5d --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE new file mode 100644 index 0000000000..23a0ada2fb --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go new file mode 100644 index 0000000000..ba4ae31f19 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go @@ -0,0 +1,84 @@ +// Copyright 2014 Docker, Inc. +// Copyright 2015-2018 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package daemon provides a Go implementation of the sd_notify protocol. +// It can be used to inform systemd of service start-up completion, watchdog +// events, and other status changes. +// +// https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description +package daemon + +import ( + "net" + "os" +) + +const ( + // SdNotifyReady tells the service manager that service startup is finished + // or the service finished loading its configuration. + SdNotifyReady = "READY=1" + + // SdNotifyStopping tells the service manager that the service is beginning + // its shutdown. + SdNotifyStopping = "STOPPING=1" + + // SdNotifyReloading tells the service manager that this service is + // reloading its configuration. Note that you must call SdNotifyReady when + // it completed reloading. + SdNotifyReloading = "RELOADING=1" + + // SdNotifyWatchdog tells the service manager to update the watchdog + // timestamp for the service. + SdNotifyWatchdog = "WATCHDOG=1" +) + +// SdNotify sends a message to the init daemon. It is common to ignore the error. +// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET` +// will be unconditionally unset. +// +// It returns one of the following: +// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset) +// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data) +// (true, nil) - notification supported, data has been sent +func SdNotify(unsetEnvironment bool, state string) (bool, error) { + socketAddr := &net.UnixAddr{ + Name: os.Getenv("NOTIFY_SOCKET"), + Net: "unixgram", + } + + // NOTIFY_SOCKET not set + if socketAddr.Name == "" { + return false, nil + } + + if unsetEnvironment { + if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil { + return false, err + } + } + + conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) + // Error connecting to NOTIFY_SOCKET + if err != nil { + return false, err + } + defer conn.Close() + + if _, err = conn.Write([]byte(state)); err != nil { + return false, err + } + return true, nil +} diff --git a/vendor/github.com/coreos/go-systemd/daemon/watchdog.go b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go new file mode 100644 index 0000000000..7a0e0d3a51 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go @@ -0,0 +1,73 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package daemon + +import ( + "fmt" + "os" + "strconv" + "time" +) + +// SdWatchdogEnabled returns watchdog information for a service. +// Processes should call daemon.SdNotify(false, daemon.SdNotifyWatchdog) every +// time / 2. +// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC` and +// `WATCHDOG_PID` will be unconditionally unset. +// +// It returns one of the following: +// (0, nil) - watchdog isn't enabled or we aren't the watched PID. +// (0, err) - an error happened (e.g. error converting time). +// (time, nil) - watchdog is enabled and we can send ping. +// time is delay before inactive service will be killed. +func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) { + wusec := os.Getenv("WATCHDOG_USEC") + wpid := os.Getenv("WATCHDOG_PID") + if unsetEnvironment { + wusecErr := os.Unsetenv("WATCHDOG_USEC") + wpidErr := os.Unsetenv("WATCHDOG_PID") + if wusecErr != nil { + return 0, wusecErr + } + if wpidErr != nil { + return 0, wpidErr + } + } + + if wusec == "" { + return 0, nil + } + s, err := strconv.Atoi(wusec) + if err != nil { + return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err) + } + if s <= 0 { + return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number") + } + interval := time.Duration(s) * time.Microsecond + + if wpid == "" { + return interval, nil + } + p, err := strconv.Atoi(wpid) + if err != nil { + return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err) + } + if os.Getpid() != p { + return 0, nil + } + + return interval, nil +} diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go new file mode 100644 index 0000000000..a0f4837a02 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/journal/journal.go @@ -0,0 +1,225 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +var ( + // This can be overridden at build-time: + // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable + journalSocket = "/run/systemd/journal/socket" + + // unixConnPtr atomically holds the local unconnected Unix-domain socket. + // Concrete safe pointer type: *net.UnixConn + unixConnPtr unsafe.Pointer + // onceConn ensures that unixConnPtr is initialized exactly once. + onceConn sync.Once +) + +func init() { + onceConn.Do(initConn) +} + +// Enabled checks whether the local systemd journal is available for logging. +func Enabled() bool { + onceConn.Do(initConn) + + if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { + return false + } + + if _, err := net.Dial("unixgram", journalSocket); err != nil { + return false + } + + return true +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + if conn == nil { + return errors.New("could not initialize socket to journald") + } + + socketAddr := &net.UnixAddr{ + Name: journalSocket, + Net: "unixgram", + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) + if err == nil { + return nil + } + if !isSocketSpaceError(err) { + return err + } + + // Large log entry, send it via tempfile and ancillary-fd. + file, err := tempFd() + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return err + } + rights := syscall.UnixRights(int(file.Fd())) + _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) + if err != nil { + return err + } + + return nil +} + +// Print prints a message to the local systemd journal using Send(). +func Print(priority Priority, format string, a ...interface{}) error { + return Send(fmt.Sprintf(format, a...), priority, nil) +} + +func appendVariable(w io.Writer, name, value string) { + if err := validVarName(name); err != nil { + fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +// validVarName validates a variable name to make sure journald will accept it. +// The variable name must be in uppercase and consist only of characters, +// numbers and underscores, and may not begin with an underscore: +// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html +func validVarName(name string) error { + if name == "" { + return errors.New("Empty variable name") + } else if name[0] == '_' { + return errors.New("Variable name begins with an underscore") + } + + for _, c := range name { + if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + return errors.New("Variable name contains invalid characters") + } + } + return nil +} + +// isSocketSpaceError checks whether the error is signaling +// an "overlarge message" condition. +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok || opErr == nil { + return false + } + + sysErr, ok := opErr.Err.(*os.SyscallError) + if !ok || sysErr == nil { + return false + } + + return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS +} + +// tempFd creates a temporary, unlinked file under `/dev/shm`. +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + err = syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +// initConn initializes the global `unixConnPtr` socket. +// It is meant to be called exactly once, at program startup. +func initConn() { + autobind, err := net.ResolveUnixAddr("unixgram", "") + if err != nil { + return + } + + sock, err := net.ListenUnixgram("unixgram", autobind) + if err != nil { + return + } + + atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) +} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE new file mode 100644 index 0000000000..e06d208186 --- /dev/null +++ b/vendor/github.com/coreos/pkg/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE new file mode 100644 index 0000000000..b39ddfa5cb --- /dev/null +++ b/vendor/github.com/coreos/pkg/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md new file mode 100644 index 0000000000..f79dbfca5c --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/README.md @@ -0,0 +1,39 @@ +# capnslog, the CoreOS logging package + +There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). +capnslog provides a simple but consistent logging interface suitable for all kinds of projects. + +### Design Principles + +##### `package main` is the place where logging gets turned on and routed + +A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. + +##### All log options are runtime-configurable. + +Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. + +##### There is one log object per package. It is registered under its repository and package name. + +`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. + +##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. + +Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. + +Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application. + +##### Log objects are an interface + +An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. + +##### Log levels have specific meanings: + + * Critical: Unrecoverable. Must fail. + * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost + * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. + * Notice: Normal, but important (uncommon) log information. + * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. + * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. + * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. + diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go new file mode 100644 index 0000000000..b305a845fb --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/formatters.go @@ -0,0 +1,157 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "fmt" + "io" + "log" + "runtime" + "strings" + "time" +) + +type Formatter interface { + Format(pkg string, level LogLevel, depth int, entries ...interface{}) + Flush() +} + +func NewStringFormatter(w io.Writer) Formatter { + return &StringFormatter{ + w: bufio.NewWriter(w), + } +} + +type StringFormatter struct { + w *bufio.Writer +} + +func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { + now := time.Now().UTC() + s.w.WriteString(now.Format(time.RFC3339)) + s.w.WriteByte(' ') + writeEntries(s.w, pkg, l, i, entries...) + s.Flush() +} + +func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { + if pkg != "" { + w.WriteString(pkg + ": ") + } + str := fmt.Sprint(entries...) + endsInNL := strings.HasSuffix(str, "\n") + w.WriteString(str) + if !endsInNL { + w.WriteString("\n") + } +} + +func (s *StringFormatter) Flush() { + s.w.Flush() +} + +func NewPrettyFormatter(w io.Writer, debug bool) Formatter { + return &PrettyFormatter{ + w: bufio.NewWriter(w), + debug: debug, + } +} + +type PrettyFormatter struct { + w *bufio.Writer + debug bool +} + +func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { + now := time.Now() + ts := now.Format("2006-01-02 15:04:05") + c.w.WriteString(ts) + ms := now.Nanosecond() / 1000 + c.w.WriteString(fmt.Sprintf(".%06d", ms)) + if c.debug { + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) + } + c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) + writeEntries(c.w, pkg, l, depth, entries...) + c.Flush() +} + +func (c *PrettyFormatter) Flush() { + c.w.Flush() +} + +// LogFormatter emulates the form of the traditional built-in logger. +type LogFormatter struct { + logger *log.Logger + prefix string +} + +// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the +// golang log package to actually do the logging work so that logs look similar. +func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { + return &LogFormatter{ + logger: log.New(w, "", flag), // don't use prefix here + prefix: prefix, // save it instead + } +} + +// Format builds a log message for the LogFormatter. The LogLevel is ignored. +func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { + str := fmt.Sprint(entries...) + prefix := lf.prefix + if pkg != "" { + prefix = fmt.Sprintf("%s%s: ", prefix, pkg) + } + lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5 +} + +// Flush is included so that the interface is complete, but is a no-op. +func (lf *LogFormatter) Flush() { + // noop +} + +// NilFormatter is a no-op log formatter that does nothing. +type NilFormatter struct { +} + +// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no +// messages so that you can cause part of your logging to be silent. +func NewNilFormatter() Formatter { + return &NilFormatter{} +} + +// Format does nothing. +func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { + // noop +} + +// Flush is included so that the interface is complete, but is a no-op. +func (_ *NilFormatter) Flush() { + // noop +} diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go new file mode 100644 index 0000000000..426603ef30 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go @@ -0,0 +1,96 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "bytes" + "io" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +var pid = os.Getpid() + +type GlogFormatter struct { + StringFormatter +} + +func NewGlogFormatter(w io.Writer) *GlogFormatter { + g := &GlogFormatter{} + g.w = bufio.NewWriter(w) + return g +} + +func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { + g.w.Write(GlogHeader(level, depth+1)) + g.StringFormatter.Format(pkg, level, depth+1, entries...) +} + +func GlogHeader(level LogLevel, depth int) []byte { + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + now := time.Now().UTC() + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + buf := &bytes.Buffer{} + buf.Grow(30) + _, month, day := now.Date() + hour, minute, second := now.Clock() + buf.WriteString(level.Char()) + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) + buf.WriteByte('Z') + buf.WriteByte(' ') + buf.WriteString(strconv.Itoa(pid)) + buf.WriteByte(' ') + buf.WriteString(file) + buf.WriteByte(':') + buf.WriteString(strconv.Itoa(line)) + buf.WriteByte(']') + buf.WriteByte(' ') + return buf.Bytes() +} + +const digits = "0123456789" + +func twoDigits(b *bytes.Buffer, d int) { + c2 := digits[d%10] + d /= 10 + c1 := digits[d%10] + b.WriteByte(c1) + b.WriteByte(c2) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go new file mode 100644 index 0000000000..38ce6d2618 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init.go @@ -0,0 +1,49 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "io" + "os" + "syscall" +) + +// Here's where the opinionation comes in. We need some sensible defaults, +// especially after taking over the log package. Your project (whatever it may +// be) may see things differently. That's okay; there should be no defaults in +// the main package that cannot be controlled or overridden programatically, +// otherwise it's a bug. Doing so is creating your own init_log.go file much +// like this one. + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewDefaultFormatter(os.Stderr)) + SetGlobalLogLevel(INFO) +} + +func NewDefaultFormatter(out io.Writer) Formatter { + if syscall.Getppid() == 1 { + // We're running under init, which may be systemd. + f, err := NewJournaldFormatter() + if err == nil { + return f + } + } + return NewPrettyFormatter(out, false) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go new file mode 100644 index 0000000000..4553050653 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init_windows.go @@ -0,0 +1,25 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import "os" + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewPrettyFormatter(os.Stderr, false)) + SetGlobalLogLevel(INFO) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go new file mode 100644 index 0000000000..72e05207c5 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go @@ -0,0 +1,68 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/coreos/go-systemd/journal" +) + +func NewJournaldFormatter() (Formatter, error) { + if !journal.Enabled() { + return nil, errors.New("No systemd detected") + } + return &journaldFormatter{}, nil +} + +type journaldFormatter struct{} + +func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + var pri journal.Priority + switch l { + case CRITICAL: + pri = journal.PriCrit + case ERROR: + pri = journal.PriErr + case WARNING: + pri = journal.PriWarning + case NOTICE: + pri = journal.PriNotice + case INFO: + pri = journal.PriInfo + case DEBUG: + pri = journal.PriDebug + case TRACE: + pri = journal.PriDebug + default: + panic("Unhandled loglevel") + } + msg := fmt.Sprint(entries...) + tags := map[string]string{ + "PACKAGE": pkg, + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + } + err := journal.Send(msg, pri, tags) + if err != nil { + fmt.Fprintln(os.Stderr, err) + } +} + +func (j *journaldFormatter) Flush() {} diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go new file mode 100644 index 0000000000..970086b9f9 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go @@ -0,0 +1,39 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "log" +) + +func initHijack() { + pkg := NewPackageLogger("log", "") + w := packageWriter{pkg} + log.SetFlags(0) + log.SetPrefix("") + log.SetOutput(w) +} + +type packageWriter struct { + pl *PackageLogger +} + +func (p packageWriter) Write(b []byte) (int, error) { + if p.pl.level < INFO { + return 0, nil + } + p.pl.internalLog(calldepth+2, INFO, string(b)) + return len(b), nil +} diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go new file mode 100644 index 0000000000..226b60c225 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/logmap.go @@ -0,0 +1,245 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "errors" + "strings" + "sync" +) + +// LogLevel is the set of all log levels. +type LogLevel int8 + +const ( + // CRITICAL is the lowest log level; only errors which will end the program will be propagated. + CRITICAL LogLevel = iota - 1 + // ERROR is for errors that are not fatal but lead to troubling behavior. + ERROR + // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. + WARNING + // NOTICE is for normal but significant conditions. + NOTICE + // INFO is a log level for common, everyday log updates. + INFO + // DEBUG is the default hidden level for more verbose updates about internal processes. + DEBUG + // TRACE is for (potentially) call by call tracing of programs. + TRACE +) + +// Char returns a single-character representation of the log level. +func (l LogLevel) Char() string { + switch l { + case CRITICAL: + return "C" + case ERROR: + return "E" + case WARNING: + return "W" + case NOTICE: + return "N" + case INFO: + return "I" + case DEBUG: + return "D" + case TRACE: + return "T" + default: + panic("Unhandled loglevel") + } +} + +// String returns a multi-character representation of the log level. +func (l LogLevel) String() string { + switch l { + case CRITICAL: + return "CRITICAL" + case ERROR: + return "ERROR" + case WARNING: + return "WARNING" + case NOTICE: + return "NOTICE" + case INFO: + return "INFO" + case DEBUG: + return "DEBUG" + case TRACE: + return "TRACE" + default: + panic("Unhandled loglevel") + } +} + +// Update using the given string value. Fulfills the flag.Value interface. +func (l *LogLevel) Set(s string) error { + value, err := ParseLevel(s) + if err != nil { + return err + } + + *l = value + return nil +} + +// Returns an empty string, only here to fulfill the pflag.Value interface. +func (l *LogLevel) Type() string { + return "" +} + +// ParseLevel translates some potential loglevel strings into their corresponding levels. +func ParseLevel(s string) (LogLevel, error) { + switch s { + case "CRITICAL", "C": + return CRITICAL, nil + case "ERROR", "0", "E": + return ERROR, nil + case "WARNING", "1", "W": + return WARNING, nil + case "NOTICE", "2", "N": + return NOTICE, nil + case "INFO", "3", "I": + return INFO, nil + case "DEBUG", "4", "D": + return DEBUG, nil + case "TRACE", "5", "T": + return TRACE, nil + } + return CRITICAL, errors.New("couldn't parse log level " + s) +} + +type RepoLogger map[string]*PackageLogger + +type loggerStruct struct { + sync.Mutex + repoMap map[string]RepoLogger + formatter Formatter +} + +// logger is the global logger +var logger = new(loggerStruct) + +// SetGlobalLogLevel sets the log level for all packages in all repositories +// registered with capnslog. +func SetGlobalLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + for _, r := range logger.repoMap { + r.setRepoLogLevelInternal(l) + } +} + +// GetRepoLogger may return the handle to the repository's set of packages' loggers. +func GetRepoLogger(repo string) (RepoLogger, error) { + logger.Lock() + defer logger.Unlock() + r, ok := logger.repoMap[repo] + if !ok { + return nil, errors.New("no packages registered for repo " + repo) + } + return r, nil +} + +// MustRepoLogger returns the handle to the repository's packages' loggers. +func MustRepoLogger(repo string) RepoLogger { + r, err := GetRepoLogger(repo) + if err != nil { + panic(err) + } + return r +} + +// SetRepoLogLevel sets the log level for all packages in the repository. +func (r RepoLogger) SetRepoLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + r.setRepoLogLevelInternal(l) +} + +func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { + for _, v := range r { + v.level = l + } +} + +// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in +// order, and returns a map of the results, for use in SetLogLevel. +func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { + setlist := strings.Split(conf, ",") + out := make(map[string]LogLevel) + for _, setstring := range setlist { + setting := strings.Split(setstring, "=") + if len(setting) != 2 { + return nil, errors.New("oddly structured `pkg=level` option: " + setstring) + } + l, err := ParseLevel(setting[1]) + if err != nil { + return nil, err + } + out[setting[0]] = l + } + return out, nil +} + +// SetLogLevel takes a map of package names within a repository to their desired +// loglevel, and sets the levels appropriately. Unknown packages are ignored. +// "*" is a special package name that corresponds to all packages, and will be +// processed first. +func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { + logger.Lock() + defer logger.Unlock() + if l, ok := m["*"]; ok { + r.setRepoLogLevelInternal(l) + } + for k, v := range m { + l, ok := r[k] + if !ok { + continue + } + l.level = v + } +} + +// SetFormatter sets the formatting function for all logs. +func SetFormatter(f Formatter) { + logger.Lock() + defer logger.Unlock() + logger.formatter = f +} + +// NewPackageLogger creates a package logger object. +// This should be defined as a global var in your package, referencing your repo. +func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { + logger.Lock() + defer logger.Unlock() + if logger.repoMap == nil { + logger.repoMap = make(map[string]RepoLogger) + } + r, rok := logger.repoMap[repo] + if !rok { + logger.repoMap[repo] = make(RepoLogger) + r = logger.repoMap[repo] + } + p, pok := r[pkg] + if !pok { + r[pkg] = &PackageLogger{ + pkg: pkg, + level: INFO, + } + p = r[pkg] + } + return +} diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go new file mode 100644 index 0000000000..00ff37149a --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go @@ -0,0 +1,191 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "fmt" + "os" +) + +type PackageLogger struct { + pkg string + level LogLevel +} + +const calldepth = 2 + +func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { + logger.Lock() + defer logger.Unlock() + if inLevel != CRITICAL && p.level < inLevel { + return + } + if logger.formatter != nil { + logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) + } +} + +// SetLevel allows users to change the current logging level. +func (p *PackageLogger) SetLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + p.level = l +} + +// LevelAt checks if the given log level will be outputted under current setting. +func (p *PackageLogger) LevelAt(l LogLevel) bool { + logger.Lock() + defer logger.Unlock() + return p.level >= l +} + +// Log a formatted string at any level between ERROR and TRACE +func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) +} + +// Log a message at any level between ERROR and TRACE +func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprint(args...)) +} + +// log stdlib compatibility + +func (p *PackageLogger) Println(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) +} + +func (p *PackageLogger) Printf(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Print(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprint(args...)) +} + +// Panic and fatal + +func (p *PackageLogger) Panicf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panic(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panicln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Fatalf(format string, args ...interface{}) { + p.Logf(CRITICAL, format, args...) + os.Exit(1) +} + +func (p *PackageLogger) Fatal(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +func (p *PackageLogger) Fatalln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +// Error Functions + +func (p *PackageLogger) Errorf(format string, args ...interface{}) { + p.Logf(ERROR, format, args...) +} + +func (p *PackageLogger) Error(entries ...interface{}) { + p.internalLog(calldepth, ERROR, entries...) +} + +// Warning Functions + +func (p *PackageLogger) Warningf(format string, args ...interface{}) { + p.Logf(WARNING, format, args...) +} + +func (p *PackageLogger) Warning(entries ...interface{}) { + p.internalLog(calldepth, WARNING, entries...) +} + +// Notice Functions + +func (p *PackageLogger) Noticef(format string, args ...interface{}) { + p.Logf(NOTICE, format, args...) +} + +func (p *PackageLogger) Notice(entries ...interface{}) { + p.internalLog(calldepth, NOTICE, entries...) +} + +// Info Functions + +func (p *PackageLogger) Infof(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Info(entries ...interface{}) { + p.internalLog(calldepth, INFO, entries...) +} + +// Debug Functions + +func (p *PackageLogger) Debugf(format string, args ...interface{}) { + if p.level < DEBUG { + return + } + p.Logf(DEBUG, format, args...) +} + +func (p *PackageLogger) Debug(entries ...interface{}) { + if p.level < DEBUG { + return + } + p.internalLog(calldepth, DEBUG, entries...) +} + +// Trace Functions + +func (p *PackageLogger) Tracef(format string, args ...interface{}) { + if p.level < TRACE { + return + } + p.Logf(TRACE, format, args...) +} + +func (p *PackageLogger) Trace(entries ...interface{}) { + if p.level < TRACE { + return + } + p.internalLog(calldepth, TRACE, entries...) +} + +func (p *PackageLogger) Flush() { + logger.Lock() + defer logger.Unlock() + logger.formatter.Flush() +} diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go new file mode 100644 index 0000000000..4be5a1f2de --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go @@ -0,0 +1,65 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "fmt" + "log/syslog" +) + +func NewSyslogFormatter(w *syslog.Writer) Formatter { + return &syslogFormatter{w} +} + +func NewDefaultSyslogFormatter(tag string) (Formatter, error) { + w, err := syslog.New(syslog.LOG_DEBUG, tag) + if err != nil { + return nil, err + } + return NewSyslogFormatter(w), nil +} + +type syslogFormatter struct { + w *syslog.Writer +} + +func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + for _, entry := range entries { + str := fmt.Sprint(entry) + switch l { + case CRITICAL: + s.w.Crit(str) + case ERROR: + s.w.Err(str) + case WARNING: + s.w.Warning(str) + case NOTICE: + s.w.Notice(str) + case INFO: + s.w.Info(str) + case DEBUG: + s.w.Debug(str) + case TRACE: + s.w.Debug(str) + default: + panic("Unhandled loglevel") + } + } +} + +func (s *syslogFormatter) Flush() { +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 0000000000..0b4659b731 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 0000000000..081c86fa8e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 0000000000..1e91766aee --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 0000000000..f6502e4b90 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 0000000000..b80c85653f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 0000000000..390d4e4be6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 0000000000..3496dc99d5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 0000000000..a85bf1984c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 0000000000..18b2a3318a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 0000000000..165b2110df --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 0000000000..e0846a357d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 0000000000..d8156a60ba --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 0000000000..04fdf09f13 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 0000000000..b4bb97f6bc --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 0000000000..5dc68268d9 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 0000000000..f765a46f91 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 0000000000..fa820b9d30 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 0000000000..5b8a4b9af8 --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod new file mode 100644 index 0000000000..fc84cd79d4 --- /dev/null +++ b/vendor/github.com/google/uuid/go.mod @@ -0,0 +1 @@ +module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 0000000000..b174616315 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 0000000000..14bd34072b --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 0000000000..d651a2b061 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 0000000000..24b78edc90 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 0000000000..0cbbcddbd6 --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 0000000000..f326b54db3 --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 0000000000..e6ef06cdc8 --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 0000000000..5ea6c73780 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 0000000000..524404cc52 --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,245 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 0000000000..463109629e --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 0000000000..c110465db5 --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + return NewRandomFromReader(rander) +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore new file mode 100644 index 0000000000..2233cff9d1 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore @@ -0,0 +1,201 @@ +#vendor +vendor/ + +# Created by .ignore support plugin (hsz.mobi) +coverage.txt +### Go template +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +### Windows template +# Windows image file caches +Thumbs.db +ehthumbs.db + +# Folder config file +Desktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msm +*.msp + +# Windows shortcuts +*.lnk +### Kate template +# Swap Files # +.*.kate-swp +.swp.* +### SublimeText template +# cache files for sublime text +*.tmlanguage.cache +*.tmPreferences.cache +*.stTheme.cache + +# workspace files are user-specific +*.sublime-workspace + +# project files should be checked into the repository, unless a significant +# proportion of contributors will probably not be using SublimeText +# *.sublime-project + +# sftp configuration file +sftp-config.json +### Linux template +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea +.idea/tasks.xml +.idea/dictionaries +.idea/vcs.xml +.idea/jsLibraryMappings.xml + +# Sensitive or high-churn files: +.idea/dataSources.ids +.idea/dataSources.xml +.idea/dataSources.local.xml +.idea/sqlDataSources.xml +.idea/dynamic.xml +.idea/uiDesigner.xml + +# Gradle: +.idea/gradle.xml +.idea/libraries + +# Mongo Explorer plugin: +.idea/mongoSettings.xml + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties +### Xcode template +# Xcode +# +# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore + +## Build generated +build/ +DerivedData/ + +## Various settings +*.pbxuser +!default.pbxuser +*.mode1v3 +!default.mode1v3 +*.mode2v3 +!default.mode2v3 +*.perspectivev3 +!default.perspectivev3 +xcuserdata/ + +## Other +*.moved-aside +*.xccheckout +*.xcscmblueprint +### Eclipse template + +.metadata +bin/ +tmp/ +*.tmp +*.bak +*.swp +*~.nib +local.properties +.settings/ +.loadpath +.recommenders + +# Eclipse Core +.project + +# External tool builders +.externalToolBuilders/ + +# Locally stored "Eclipse launch configurations" +*.launch + +# PyDev specific (Python IDE for Eclipse) +*.pydevproject + +# CDT-specific (C/C++ Development Tooling) +.cproject + +# JDT-specific (Eclipse Java Development Tools) +.classpath + +# Java annotation processor (APT) +.factorypath + +# PDT-specific (PHP Development Tools) +.buildpath + +# sbteclipse plugin +.target + +# Tern plugin +.tern-project + +# TeXlipse plugin +.texlipse + +# STS (Spring Tool Suite) +.springBeans + +# Code Recommenders +.recommenders/ + diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml new file mode 100644 index 0000000000..2a845b96ae --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml @@ -0,0 +1,25 @@ +sudo: false +language: go +# * github.com/grpc/grpc-go still supports go1.6 +# - When we drop support for go1.6 we can remove golang.org/x/net/context +# below as it is part of the Go std library since go1.7 +# * github.com/prometheus/client_golang already requires at least go1.7 since +# September 2017 +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - master + +install: + - go get github.com/prometheus/client_golang/prometheus + - go get google.golang.org/grpc + - go get golang.org/x/net/context + - go get github.com/stretchr/testify +script: + - make test + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md new file mode 100644 index 0000000000..19a8059e1b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md @@ -0,0 +1,24 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.2.0](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases/tag/v1.2.0) - 2018-06-04 + +### Added + +* Provide metrics object as `prometheus.Collector`, for conventional metric registration. +* Support non-default/global Prometheus registry. +* Allow configuring counters with `prometheus.CounterOpts`. + +### Changed + +* Remove usage of deprecated `grpc.Code()`. +* Remove usage of deprecated `grpc.Errorf` and replace with `status.Errorf`. + +--- + +This changelog was started with version `v1.2.0`, for earlier versions refer to the respective [GitHub releases](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases). diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE new file mode 100644 index 0000000000..b2b065037f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md new file mode 100644 index 0000000000..499c583553 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md @@ -0,0 +1,247 @@ +# Go gRPC Interceptors for Prometheus monitoring + +[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus) +[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus) +[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus) +[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge) +[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus) +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) + +[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients. + +A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus). + +## Interceptors + +[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed +by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement +common patterns: auth, logging and... monitoring. + +To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware). + +## Usage + +There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both. + +### Server-side + +```go +import "github.com/grpc-ecosystem/go-grpc-prometheus" +... + // Initialize your gRPC server's interceptor. + myServer := grpc.NewServer( + grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), + grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), + ) + // Register your gRPC service implementations. + myservice.RegisterMyServiceServer(s.server, &myServiceImpl{}) + // After all your registrations, make sure all of the Prometheus metrics are initialized. + grpc_prometheus.Register(myServer) + // Register Prometheus metrics handler. + http.Handle("/metrics", promhttp.Handler()) +... +``` + +### Client-side + +```go +import "github.com/grpc-ecosystem/go-grpc-prometheus" +... + clientConn, err = grpc.Dial( + address, + grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), + grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor) + ) + client = pb_testproto.NewTestServiceClient(clientConn) + resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"}) +... +``` + +# Metrics + +## Labels + +All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods +contain the same rich labels: + + * `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and + the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and + `service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"` + * `grpc_method` - the name of the method called on the gRPC service. E.g. + `grpc_method="Ping"` + * `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle). + Differentiating between the two is important especially for latency measurements. + + - `unary` is single request, single response RPC + - `client_stream` is a multi-request, single response RPC + - `server_stream` is a single request, multi-response RPC + - `bidi_stream` is a multi-request, multi-response RPC + + +Additionally for completed RPCs, the following labels are used: + + * `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go). + The list of all statuses is to long, but here are some common ones: + + - `OK` - means the RPC was successful + - `IllegalArgument` - RPC contained bad values + - `Internal` - server-side error not disclosed to the clients + +## Counters + +The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go) +the respective Prometheus handler (usually `/metrics`). + +For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts. + +For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto), +calling the method `PingList`. The call succeeds and returns 20 messages in the stream. + +First, immediately after the server receives the call it will increment the +`grpc_server_started_total` and start the handling time clock (if histograms are enabled). + +```jsoniq +grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +Then the user logic gets invoked. It receives one message from the client containing the request +(it's a `server_stream`): + +```jsoniq +grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +The user logic may return an error, or send multiple messages back to the client. In this case, on +each of the 20 messages sent back, a counter will be incremented: + +```jsoniq +grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20 +``` + +After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go)) +and the relevant call labels increment the `grpc_server_handled_total` counter. + +```jsoniq +grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +## Histograms + +[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way +to measure latency distributions of your RPCs. However, since it is bad practice to have metrics +of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels) +the latency monitoring metrics are disabled by default. To enable them please call the following +in your server initialization code: + +```jsoniq +grpc_prometheus.EnableHandlingTimeHistogram() +``` + +After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) +variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics: + + * `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method + * `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for + calculating average handling times + * `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective + handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/)) + +The counter values will look as follows: + +```jsoniq +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1 +grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001 +grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + + +## Useful query examples + +Prometheus philosophy is to provide raw metrics to the monitoring system, and +let the aggregations be handled there. The verbosity of above metrics make it possible to have that +flexibility. Here's a couple of useful monitoring queries: + + +### request inbound rate +```jsoniq +sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service) +``` +For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the +rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note +how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together. + +### unary request error rate +```jsoniq +sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service) +``` +For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the +ones that didn't finish with `OK` code. + +### unary request error percentage +```jsoniq +sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service) + / +sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service) + * 100.0 +``` +For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that +this is a combination of the two above examples. This is an example of a query you would like to +[alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g. +"no more than 1% requests should fail". + +### average response stream size +```jsoniq +sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service) + / +sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service) +``` +For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all ` +server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows +you to track when clients started to send "wide" queries that ret +Note the divisor is the number of started RPCs, in order to account for in-flight requests. + +### 99%-tile latency of unary requests +```jsoniq +histogram_quantile(0.99, + sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le) +) +``` +For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles) +of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile +estimation will take samples in a rolling `5m` window. When combined with other quantiles +(e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system +(e.g. impact of caching). + +### percentage of slow unary queries (>250ms) +```jsoniq +100.0 - ( +sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service) + / +sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service) +) * 100.0 +``` +For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25` +seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal) +buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps. +This is an example of a query you would like to alert on in your system for SLA violations, +e.g. "less than 1% of requests are slower than 250ms". + + +## Status + +This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services at [Improbable](https://improbable.io). + +## License + +`go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go new file mode 100644 index 0000000000..751a4c72d7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go @@ -0,0 +1,39 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Prometheus monitoring interceptors for client-side gRPC. + +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" +) + +var ( + // DefaultClientMetrics is the default instance of ClientMetrics. It is + // intended to be used in conjunction the default Prometheus metrics + // registry. + DefaultClientMetrics = NewClientMetrics() + + // UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs. + UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor() + + // StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs. + StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor() +) + +func init() { + prom.MustRegister(DefaultClientMetrics.clientStartedCounter) + prom.MustRegister(DefaultClientMetrics.clientHandledCounter) + prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived) + prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent) +} + +// EnableClientHandlingTimeHistogram turns on recording of handling time of +// RPCs. Histogram metrics can be very expensive for Prometheus to retain and +// query. This function acts on the DefaultClientMetrics variable and the +// default Prometheus metrics registry. +func EnableClientHandlingTimeHistogram(opts ...HistogramOption) { + DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...) + prom.Register(DefaultClientMetrics.clientHandledHistogram) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go new file mode 100644 index 0000000000..9b476f9832 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go @@ -0,0 +1,170 @@ +package grpc_prometheus + +import ( + "io" + + prom "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ClientMetrics represents a collection of metrics to be registered on a +// Prometheus metrics registry for a gRPC client. +type ClientMetrics struct { + clientStartedCounter *prom.CounterVec + clientHandledCounter *prom.CounterVec + clientStreamMsgReceived *prom.CounterVec + clientStreamMsgSent *prom.CounterVec + clientHandledHistogramEnabled bool + clientHandledHistogramOpts prom.HistogramOpts + clientHandledHistogram *prom.HistogramVec +} + +// NewClientMetrics returns a ClientMetrics object. Use a new instance of +// ClientMetrics when not using the default Prometheus metrics registry, for +// example when wanting to control which metrics are added to a registry as +// opposed to automatically adding metrics via init functions. +func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics { + opts := counterOptions(counterOpts) + return &ClientMetrics{ + clientStartedCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_started_total", + Help: "Total number of RPCs started on the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientHandledCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_handled_total", + Help: "Total number of RPCs completed by the client, regardless of success or failure.", + }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}), + + clientStreamMsgReceived: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_msg_received_total", + Help: "Total number of RPC stream messages received by the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientStreamMsgSent: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_msg_sent_total", + Help: "Total number of gRPC stream messages sent by the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientHandledHistogramEnabled: false, + clientHandledHistogramOpts: prom.HistogramOpts{ + Name: "grpc_client_handling_seconds", + Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.", + Buckets: prom.DefBuckets, + }, + clientHandledHistogram: nil, + } +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once +// the last descriptor has been sent. +func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) { + m.clientStartedCounter.Describe(ch) + m.clientHandledCounter.Describe(ch) + m.clientStreamMsgReceived.Describe(ch) + m.clientStreamMsgSent.Describe(ch) + if m.clientHandledHistogramEnabled { + m.clientHandledHistogram.Describe(ch) + } +} + +// Collect is called by the Prometheus registry when collecting +// metrics. The implementation sends each collected metric via the +// provided channel and returns once the last metric has been sent. +func (m *ClientMetrics) Collect(ch chan<- prom.Metric) { + m.clientStartedCounter.Collect(ch) + m.clientHandledCounter.Collect(ch) + m.clientStreamMsgReceived.Collect(ch) + m.clientStreamMsgSent.Collect(ch) + if m.clientHandledHistogramEnabled { + m.clientHandledHistogram.Collect(ch) + } +} + +// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&m.clientHandledHistogramOpts) + } + if !m.clientHandledHistogramEnabled { + m.clientHandledHistogram = prom.NewHistogramVec( + m.clientHandledHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } + m.clientHandledHistogramEnabled = true +} + +// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs. +func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + monitor := newClientReporter(m, Unary, method) + monitor.SentMessage() + err := invoker(ctx, method, req, reply, cc, opts...) + if err != nil { + monitor.ReceivedMessage() + } + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + return err + } +} + +// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + monitor := newClientReporter(m, clientStreamType(desc), method) + clientStream, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + return nil, err + } + return &monitoredClientStream{clientStream, monitor}, nil + } +} + +func clientStreamType(desc *grpc.StreamDesc) grpcType { + if desc.ClientStreams && !desc.ServerStreams { + return ClientStream + } else if !desc.ClientStreams && desc.ServerStreams { + return ServerStream + } + return BidiStream +} + +// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters. +type monitoredClientStream struct { + grpc.ClientStream + monitor *clientReporter +} + +func (s *monitoredClientStream) SendMsg(m interface{}) error { + err := s.ClientStream.SendMsg(m) + if err == nil { + s.monitor.SentMessage() + } + return err +} + +func (s *monitoredClientStream) RecvMsg(m interface{}) error { + err := s.ClientStream.RecvMsg(m) + if err == nil { + s.monitor.ReceivedMessage() + } else if err == io.EOF { + s.monitor.Handled(codes.OK) + } else { + st, _ := status.FromError(err) + s.monitor.Handled(st.Code()) + } + return err +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go new file mode 100644 index 0000000000..cbf1532299 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go @@ -0,0 +1,46 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "time" + + "google.golang.org/grpc/codes" +) + +type clientReporter struct { + metrics *ClientMetrics + rpcType grpcType + serviceName string + methodName string + startTime time.Time +} + +func newClientReporter(m *ClientMetrics, rpcType grpcType, fullMethod string) *clientReporter { + r := &clientReporter{ + metrics: m, + rpcType: rpcType, + } + if r.metrics.clientHandledHistogramEnabled { + r.startTime = time.Now() + } + r.serviceName, r.methodName = splitMethodName(fullMethod) + r.metrics.clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() + return r +} + +func (r *clientReporter) ReceivedMessage() { + r.metrics.clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *clientReporter) SentMessage() { + r.metrics.clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *clientReporter) Handled(code codes.Code) { + r.metrics.clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() + if r.metrics.clientHandledHistogramEnabled { + r.metrics.clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile new file mode 100644 index 0000000000..74c0842230 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile @@ -0,0 +1,16 @@ +SHELL="/bin/bash" + +GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/) + +all: vet fmt test + +fmt: + go fmt $(GOFILES_NOVENDOR) + +vet: + go vet $(GOFILES_NOVENDOR) + +test: vet + ./scripts/test_all.sh + +.PHONY: all vet test diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go new file mode 100644 index 0000000000..9d51aec981 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go @@ -0,0 +1,41 @@ +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" +) + +// A CounterOption lets you add options to Counter metrics using With* funcs. +type CounterOption func(*prom.CounterOpts) + +type counterOptions []CounterOption + +func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts { + for _, f := range co { + f(&o) + } + return o +} + +// WithConstLabels allows you to add ConstLabels to Counter metrics. +func WithConstLabels(labels prom.Labels) CounterOption { + return func(o *prom.CounterOpts) { + o.ConstLabels = labels + } +} + +// A HistogramOption lets you add options to Histogram metrics using With* +// funcs. +type HistogramOption func(*prom.HistogramOpts) + +// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on. +func WithHistogramBuckets(buckets []float64) HistogramOption { + return func(o *prom.HistogramOpts) { o.Buckets = buckets } +} + +// WithHistogramConstLabels allows you to add custom ConstLabels to +// histograms metrics. +func WithHistogramConstLabels(labels prom.Labels) HistogramOption { + return func(o *prom.HistogramOpts) { + o.ConstLabels = labels + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go new file mode 100644 index 0000000000..322f99046f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go @@ -0,0 +1,48 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Prometheus monitoring interceptors for server-side gRPC. + +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" +) + +var ( + // DefaultServerMetrics is the default instance of ServerMetrics. It is + // intended to be used in conjunction the default Prometheus metrics + // registry. + DefaultServerMetrics = NewServerMetrics() + + // UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. + UnaryServerInterceptor = DefaultServerMetrics.UnaryServerInterceptor() + + // StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs. + StreamServerInterceptor = DefaultServerMetrics.StreamServerInterceptor() +) + +func init() { + prom.MustRegister(DefaultServerMetrics.serverStartedCounter) + prom.MustRegister(DefaultServerMetrics.serverHandledCounter) + prom.MustRegister(DefaultServerMetrics.serverStreamMsgReceived) + prom.MustRegister(DefaultServerMetrics.serverStreamMsgSent) +} + +// Register takes a gRPC server and pre-initializes all counters to 0. This +// allows for easier monitoring in Prometheus (no missing metrics), and should +// be called *after* all services have been registered with the server. This +// function acts on the DefaultServerMetrics variable. +func Register(server *grpc.Server) { + DefaultServerMetrics.InitializeMetrics(server) +} + +// EnableHandlingTimeHistogram turns on recording of handling time +// of RPCs. Histogram metrics can be very expensive for Prometheus +// to retain and query. This function acts on the DefaultServerMetrics +// variable and the default Prometheus metrics registry. +func EnableHandlingTimeHistogram(opts ...HistogramOption) { + DefaultServerMetrics.EnableHandlingTimeHistogram(opts...) + prom.Register(DefaultServerMetrics.serverHandledHistogram) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go new file mode 100644 index 0000000000..5b1467e7aa --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go @@ -0,0 +1,185 @@ +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/status" +) + +// ServerMetrics represents a collection of metrics to be registered on a +// Prometheus metrics registry for a gRPC server. +type ServerMetrics struct { + serverStartedCounter *prom.CounterVec + serverHandledCounter *prom.CounterVec + serverStreamMsgReceived *prom.CounterVec + serverStreamMsgSent *prom.CounterVec + serverHandledHistogramEnabled bool + serverHandledHistogramOpts prom.HistogramOpts + serverHandledHistogram *prom.HistogramVec +} + +// NewServerMetrics returns a ServerMetrics object. Use a new instance of +// ServerMetrics when not using the default Prometheus metrics registry, for +// example when wanting to control which metrics are added to a registry as +// opposed to automatically adding metrics via init functions. +func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics { + opts := counterOptions(counterOpts) + return &ServerMetrics{ + serverStartedCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_started_total", + Help: "Total number of RPCs started on the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverHandledCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_handled_total", + Help: "Total number of RPCs completed on the server, regardless of success or failure.", + }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}), + serverStreamMsgReceived: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_msg_received_total", + Help: "Total number of RPC stream messages received on the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverStreamMsgSent: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_msg_sent_total", + Help: "Total number of gRPC stream messages sent by the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverHandledHistogramEnabled: false, + serverHandledHistogramOpts: prom.HistogramOpts{ + Name: "grpc_server_handling_seconds", + Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.", + Buckets: prom.DefBuckets, + }, + serverHandledHistogram: nil, + } +} + +// EnableHandlingTimeHistogram enables histograms being registered when +// registering the ServerMetrics on a Prometheus registry. Histograms can be +// expensive on Prometheus servers. It takes options to configure histogram +// options such as the defined buckets. +func (m *ServerMetrics) EnableHandlingTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&m.serverHandledHistogramOpts) + } + if !m.serverHandledHistogramEnabled { + m.serverHandledHistogram = prom.NewHistogramVec( + m.serverHandledHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } + m.serverHandledHistogramEnabled = true +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once +// the last descriptor has been sent. +func (m *ServerMetrics) Describe(ch chan<- *prom.Desc) { + m.serverStartedCounter.Describe(ch) + m.serverHandledCounter.Describe(ch) + m.serverStreamMsgReceived.Describe(ch) + m.serverStreamMsgSent.Describe(ch) + if m.serverHandledHistogramEnabled { + m.serverHandledHistogram.Describe(ch) + } +} + +// Collect is called by the Prometheus registry when collecting +// metrics. The implementation sends each collected metric via the +// provided channel and returns once the last metric has been sent. +func (m *ServerMetrics) Collect(ch chan<- prom.Metric) { + m.serverStartedCounter.Collect(ch) + m.serverHandledCounter.Collect(ch) + m.serverStreamMsgReceived.Collect(ch) + m.serverStreamMsgSent.Collect(ch) + if m.serverHandledHistogramEnabled { + m.serverHandledHistogram.Collect(ch) + } +} + +// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. +func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + monitor := newServerReporter(m, Unary, info.FullMethod) + monitor.ReceivedMessage() + resp, err := handler(ctx, req) + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + if err == nil { + monitor.SentMessage() + } + return resp, err + } +} + +// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + monitor := newServerReporter(m, streamRPCType(info), info.FullMethod) + err := handler(srv, &monitoredServerStream{ss, monitor}) + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + return err + } +} + +// InitializeMetrics initializes all metrics, with their appropriate null +// value, for all gRPC methods registered on a gRPC server. This is useful, to +// ensure that all metrics exist when collecting and querying. +func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) { + serviceInfo := server.GetServiceInfo() + for serviceName, info := range serviceInfo { + for _, mInfo := range info.Methods { + preRegisterMethod(m, serviceName, &mInfo) + } + } +} + +func streamRPCType(info *grpc.StreamServerInfo) grpcType { + if info.IsClientStream && !info.IsServerStream { + return ClientStream + } else if !info.IsClientStream && info.IsServerStream { + return ServerStream + } + return BidiStream +} + +// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters. +type monitoredServerStream struct { + grpc.ServerStream + monitor *serverReporter +} + +func (s *monitoredServerStream) SendMsg(m interface{}) error { + err := s.ServerStream.SendMsg(m) + if err == nil { + s.monitor.SentMessage() + } + return err +} + +func (s *monitoredServerStream) RecvMsg(m interface{}) error { + err := s.ServerStream.RecvMsg(m) + if err == nil { + s.monitor.ReceivedMessage() + } + return err +} + +// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated. +func preRegisterMethod(metrics *ServerMetrics, serviceName string, mInfo *grpc.MethodInfo) { + methodName := mInfo.Name + methodType := string(typeFromMethodInfo(mInfo)) + // These are just references (no increments), as just referencing will create the labels but not set values. + metrics.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName) + metrics.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName) + metrics.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName) + if metrics.serverHandledHistogramEnabled { + metrics.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName) + } + for _, code := range allCodes { + metrics.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String()) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go new file mode 100644 index 0000000000..aa9db5401a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go @@ -0,0 +1,46 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "time" + + "google.golang.org/grpc/codes" +) + +type serverReporter struct { + metrics *ServerMetrics + rpcType grpcType + serviceName string + methodName string + startTime time.Time +} + +func newServerReporter(m *ServerMetrics, rpcType grpcType, fullMethod string) *serverReporter { + r := &serverReporter{ + metrics: m, + rpcType: rpcType, + } + if r.metrics.serverHandledHistogramEnabled { + r.startTime = time.Now() + } + r.serviceName, r.methodName = splitMethodName(fullMethod) + r.metrics.serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() + return r +} + +func (r *serverReporter) ReceivedMessage() { + r.metrics.serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *serverReporter) SentMessage() { + r.metrics.serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *serverReporter) Handled(code codes.Code) { + r.metrics.serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() + if r.metrics.serverHandledHistogramEnabled { + r.metrics.serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go new file mode 100644 index 0000000000..7987de35f4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go @@ -0,0 +1,50 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +type grpcType string + +const ( + Unary grpcType = "unary" + ClientStream grpcType = "client_stream" + ServerStream grpcType = "server_stream" + BidiStream grpcType = "bidi_stream" +) + +var ( + allCodes = []codes.Code{ + codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound, + codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted, + codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal, + codes.Unavailable, codes.DataLoss, + } +) + +func splitMethodName(fullMethodName string) (string, string) { + fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash + if i := strings.Index(fullMethodName, "/"); i >= 0 { + return fullMethodName[:i], fullMethodName[i+1:] + } + return "unknown", "unknown" +} + +func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType { + if !mInfo.IsClientStream && !mInfo.IsServerStream { + return Unary + } + if mInfo.IsClientStream && !mInfo.IsServerStream { + return ClientStream + } + if !mInfo.IsClientStream && mInfo.IsServerStream { + return ServerStream + } + return BidiStream +} diff --git a/vendor/github.com/munnerz/goautoneg/LICENSE b/vendor/github.com/munnerz/goautoneg/LICENSE new file mode 100644 index 0000000000..bbc7b897cb --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/LICENSE @@ -0,0 +1,31 @@ +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/munnerz/goautoneg/Makefile b/vendor/github.com/munnerz/goautoneg/Makefile new file mode 100644 index 0000000000..e33ee17303 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/Makefile @@ -0,0 +1,13 @@ +include $(GOROOT)/src/Make.inc + +TARG=bitbucket.org/ww/goautoneg +GOFILES=autoneg.go + +include $(GOROOT)/src/Make.pkg + +format: + gofmt -w *.go + +docs: + gomake clean + godoc ${TARG} > README.txt diff --git a/vendor/github.com/munnerz/goautoneg/README.txt b/vendor/github.com/munnerz/goautoneg/README.txt new file mode 100644 index 0000000000..7723656d58 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/vendor/github.com/munnerz/goautoneg/autoneg.go b/vendor/github.com/munnerz/goautoneg/autoneg.go new file mode 100644 index 0000000000..1dd1cad646 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/autoneg.go @@ -0,0 +1,189 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// acceptSlice is defined to implement sort interface. +type acceptSlice []Accept + +func (slice acceptSlice) Len() int { + return len(slice) +} + +func (slice acceptSlice) Less(i, j int) bool { + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (slice acceptSlice) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + +func nextSplitElement(s, sep string) (item string, remaining string) { + if index := strings.Index(s, sep); index != -1 { + return s[:index], s[index+1:] + } + return s, "" +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) acceptSlice { + partsCount := 0 + remaining := header + for len(remaining) > 0 { + partsCount++ + _, remaining = nextSplitElement(remaining, ",") + } + accept := make(acceptSlice, 0, partsCount) + + remaining = header + var part string + for len(remaining) > 0 { + part, remaining = nextSplitElement(remaining, ",") + part = strings.TrimFunc(part, stringTrimSpaceCutset) + + a := Accept{ + Q: 1.0, + } + + sp, remainingPart := nextSplitElement(part, ";") + + sp0, spRemaining := nextSplitElement(sp, "/") + a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset) + + switch { + case len(spRemaining) == 0: + if a.Type == "*" { + a.SubType = "*" + } else { + continue + } + default: + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "/") + if len(spRemaining) > 0 { + continue + } + a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset) + } + + if len(remainingPart) == 0 { + accept = append(accept, a) + continue + } + + a.Params = make(map[string]string) + for len(remainingPart) > 0 { + sp, remainingPart = nextSplitElement(remainingPart, ";") + sp0, spRemaining = nextSplitElement(sp, "=") + if len(spRemaining) == 0 { + continue + } + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "=") + if len(spRemaining) != 0 { + continue + } + token := strings.TrimFunc(sp0, stringTrimSpaceCutset) + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp1, 32) + } else { + a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset) + } + } + + accept = append(accept, a) + } + + sort.Sort(accept) + return accept +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go new file mode 100644 index 0000000000..7681877a89 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go @@ -0,0 +1,46 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil/promlint" +) + +// CollectAndLint registers the provided Collector with a newly created pedantic +// Registry. It then calls GatherAndLint with that Registry and with the +// provided metricNames. +func CollectAndLint(c prometheus.Collector, metricNames ...string) ([]promlint.Problem, error) { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return nil, fmt.Errorf("registering collector failed: %s", err) + } + return GatherAndLint(reg, metricNames...) +} + +// GatherAndLint gathers all metrics from the provided Gatherer and checks them +// with the linter in the promlint package. If any metricNames are provided, +// only metrics with those names are checked. +func GatherAndLint(g prometheus.Gatherer, metricNames ...string) ([]promlint.Problem, error) { + got, err := g.Gather() + if err != nil { + return nil, fmt.Errorf("gathering metrics failed: %s", err) + } + if metricNames != nil { + got = filterMetrics(got, metricNames) + } + return promlint.NewWithMetricFamilies(got).Lint() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go new file mode 100644 index 0000000000..ec80617062 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go @@ -0,0 +1,386 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promlint provides a linter for Prometheus metrics. +package promlint + +import ( + "fmt" + "io" + "regexp" + "sort" + "strings" + + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" +) + +// A Linter is a Prometheus metrics linter. It identifies issues with metric +// names, types, and metadata, and reports them to the caller. +type Linter struct { + // The linter will read metrics in the Prometheus text format from r and + // then lint it, _and_ it will lint the metrics provided directly as + // MetricFamily proto messages in mfs. Note, however, that the current + // constructor functions New and NewWithMetricFamilies only ever set one + // of them. + r io.Reader + mfs []*dto.MetricFamily +} + +// A Problem is an issue detected by a Linter. +type Problem struct { + // The name of the metric indicated by this Problem. + Metric string + + // A description of the issue for this Problem. + Text string +} + +// newProblem is helper function to create a Problem. +func newProblem(mf *dto.MetricFamily, text string) Problem { + return Problem{ + Metric: mf.GetName(), + Text: text, + } +} + +// New creates a new Linter that reads an input stream of Prometheus metrics in +// the Prometheus text exposition format. +func New(r io.Reader) *Linter { + return &Linter{ + r: r, + } +} + +// NewWithMetricFamilies creates a new Linter that reads from a slice of +// MetricFamily protobuf messages. +func NewWithMetricFamilies(mfs []*dto.MetricFamily) *Linter { + return &Linter{ + mfs: mfs, + } +} + +// Lint performs a linting pass, returning a slice of Problems indicating any +// issues found in the metrics stream. The slice is sorted by metric name +// and issue description. +func (l *Linter) Lint() ([]Problem, error) { + var problems []Problem + + if l.r != nil { + d := expfmt.NewDecoder(l.r, expfmt.FmtText) + + mf := &dto.MetricFamily{} + for { + if err := d.Decode(mf); err != nil { + if err == io.EOF { + break + } + + return nil, err + } + + problems = append(problems, lint(mf)...) + } + } + for _, mf := range l.mfs { + problems = append(problems, lint(mf)...) + } + + // Ensure deterministic output. + sort.SliceStable(problems, func(i, j int) bool { + if problems[i].Metric == problems[j].Metric { + return problems[i].Text < problems[j].Text + } + return problems[i].Metric < problems[j].Metric + }) + + return problems, nil +} + +// lint is the entry point for linting a single metric. +func lint(mf *dto.MetricFamily) []Problem { + fns := []func(mf *dto.MetricFamily) []Problem{ + lintHelp, + lintMetricUnits, + lintCounter, + lintHistogramSummaryReserved, + lintMetricTypeInName, + lintReservedChars, + lintCamelCase, + lintUnitAbbreviations, + } + + var problems []Problem + for _, fn := range fns { + problems = append(problems, fn(mf)...) + } + + // TODO(mdlayher): lint rules for specific metrics types. + return problems +} + +// lintHelp detects issues related to the help text for a metric. +func lintHelp(mf *dto.MetricFamily) []Problem { + var problems []Problem + + // Expect all metrics to have help text available. + if mf.Help == nil { + problems = append(problems, newProblem(mf, "no help text")) + } + + return problems +} + +// lintMetricUnits detects issues with metric unit names. +func lintMetricUnits(mf *dto.MetricFamily) []Problem { + var problems []Problem + + unit, base, ok := metricUnits(*mf.Name) + if !ok { + // No known units detected. + return nil + } + + // Unit is already a base unit. + if unit == base { + return nil + } + + problems = append(problems, newProblem(mf, fmt.Sprintf("use base unit %q instead of %q", base, unit))) + + return problems +} + +// lintCounter detects issues specific to counters, as well as patterns that should +// only be used with counters. +func lintCounter(mf *dto.MetricFamily) []Problem { + var problems []Problem + + isCounter := mf.GetType() == dto.MetricType_COUNTER + isUntyped := mf.GetType() == dto.MetricType_UNTYPED + hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total") + + switch { + case isCounter && !hasTotalSuffix: + problems = append(problems, newProblem(mf, `counter metrics should have "_total" suffix`)) + case !isUntyped && !isCounter && hasTotalSuffix: + problems = append(problems, newProblem(mf, `non-counter metrics should not have "_total" suffix`)) + } + + return problems +} + +// lintHistogramSummaryReserved detects when other types of metrics use names or labels +// reserved for use by histograms and/or summaries. +func lintHistogramSummaryReserved(mf *dto.MetricFamily) []Problem { + // These rules do not apply to untyped metrics. + t := mf.GetType() + if t == dto.MetricType_UNTYPED { + return nil + } + + var problems []Problem + + isHistogram := t == dto.MetricType_HISTOGRAM + isSummary := t == dto.MetricType_SUMMARY + + n := mf.GetName() + + if !isHistogram && strings.HasSuffix(n, "_bucket") { + problems = append(problems, newProblem(mf, `non-histogram metrics should not have "_bucket" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") { + problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_count" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") { + problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_sum" suffix`)) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + ln := l.GetName() + + if !isHistogram && ln == "le" { + problems = append(problems, newProblem(mf, `non-histogram metrics should not have "le" label`)) + } + if !isSummary && ln == "quantile" { + problems = append(problems, newProblem(mf, `non-summary metrics should not have "quantile" label`)) + } + } + } + + return problems +} + +// lintMetricTypeInName detects when metric types are included in the metric name. +func lintMetricTypeInName(mf *dto.MetricFamily) []Problem { + var problems []Problem + n := strings.ToLower(mf.GetName()) + + for i, t := range dto.MetricType_name { + if i == int32(dto.MetricType_UNTYPED) { + continue + } + + typename := strings.ToLower(t) + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, newProblem(mf, fmt.Sprintf(`metric name should not include type '%s'`, typename))) + } + } + return problems +} + +// lintReservedChars detects colons in metric names. +func lintReservedChars(mf *dto.MetricFamily) []Problem { + var problems []Problem + if strings.Contains(mf.GetName(), ":") { + problems = append(problems, newProblem(mf, "metric names should not contain ':'")) + } + return problems +} + +var camelCase = regexp.MustCompile(`[a-z][A-Z]`) + +// lintCamelCase detects metric names and label names written in camelCase. +func lintCamelCase(mf *dto.MetricFamily) []Problem { + var problems []Problem + if camelCase.FindString(mf.GetName()) != "" { + problems = append(problems, newProblem(mf, "metric names should be written in 'snake_case' not 'camelCase'")) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if camelCase.FindString(l.GetName()) != "" { + problems = append(problems, newProblem(mf, "label names should be written in 'snake_case' not 'camelCase'")) + } + } + } + return problems +} + +// lintUnitAbbreviations detects abbreviated units in the metric name. +func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem { + var problems []Problem + n := strings.ToLower(mf.GetName()) + for _, s := range unitAbbreviations { + if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) { + problems = append(problems, newProblem(mf, "metric names should not contain abbreviated units")) + } + } + return problems +} + +// metricUnits attempts to detect known unit types used as part of a metric name, +// e.g. "foo_bytes_total" or "bar_baz_milligrams". +func metricUnits(m string) (unit string, base string, ok bool) { + ss := strings.Split(m, "_") + + for unit, base := range units { + // Also check for "no prefix". + for _, p := range append(unitPrefixes, "") { + for _, s := range ss { + // Attempt to explicitly match a known unit with a known prefix, + // as some words may look like "units" when matching suffix. + // + // As an example, "thermometers" should not match "meters", but + // "kilometers" should. + if s == p+unit { + return p + unit, base, true + } + } + } + } + + return "", "", false +} + +// Units and their possible prefixes recognized by this library. More can be +// added over time as needed. +var ( + // map a unit to the appropriate base unit. + units = map[string]string{ + // Base units. + "amperes": "amperes", + "bytes": "bytes", + "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases. + "grams": "grams", + "joules": "joules", + "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements). + "meters": "meters", // Both American and international spelling permitted. + "metres": "metres", + "seconds": "seconds", + "volts": "volts", + + // Non base units. + // Time. + "minutes": "seconds", + "hours": "seconds", + "days": "seconds", + "weeks": "seconds", + // Temperature. + "kelvins": "kelvin", + "fahrenheit": "celsius", + "rankine": "celsius", + // Length. + "inches": "meters", + "yards": "meters", + "miles": "meters", + // Bytes. + "bits": "bytes", + // Energy. + "calories": "joules", + // Mass. + "pounds": "grams", + "ounces": "grams", + } + + unitPrefixes = []string{ + "pico", + "nano", + "micro", + "milli", + "centi", + "deci", + "deca", + "hecto", + "kilo", + "kibi", + "mega", + "mibi", + "giga", + "gibi", + "tera", + "tebi", + "peta", + "pebi", + } + + // Common abbreviations that we'd like to discourage. + unitAbbreviations = []string{ + "s", + "ms", + "us", + "ns", + "sec", + "b", + "kb", + "mb", + "gb", + "tb", + "pb", + "m", + "h", + "d", + } +) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go new file mode 100644 index 0000000000..9af60ce1d2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -0,0 +1,230 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testutil provides helpers to test code using the prometheus package +// of client_golang. +// +// While writing unit tests to verify correct instrumentation of your code, it's +// a common mistake to mostly test the instrumentation library instead of your +// own code. Rather than verifying that a prometheus.Counter's value has changed +// as expected or that it shows up in the exposition after registration, it is +// in general more robust and more faithful to the concept of unit tests to use +// mock implementations of the prometheus.Counter and prometheus.Registerer +// interfaces that simply assert that the Add or Register methods have been +// called with the expected arguments. However, this might be overkill in simple +// scenarios. The ToFloat64 function is provided for simple inspection of a +// single-value metric, but it has to be used with caution. +// +// End-to-end tests to verify all or larger parts of the metrics exposition can +// be implemented with the CollectAndCompare or GatherAndCompare functions. The +// most appropriate use is not so much testing instrumentation of your code, but +// testing custom prometheus.Collector implementations and in particular whole +// exporters, i.e. programs that retrieve telemetry data from a 3rd party source +// and convert it into Prometheus metrics. +// +// In a similar pattern, CollectAndLint and GatherAndLint can be used to detect +// metrics that have issues with their name, type, or metadata without being +// necessarily invalid, e.g. a counter with a name missing the “_total” suffix. +package testutil + +import ( + "bytes" + "fmt" + "io" + + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/internal" +) + +// ToFloat64 collects all Metrics from the provided Collector. It expects that +// this results in exactly one Metric being collected, which must be a Gauge, +// Counter, or Untyped. In all other cases, ToFloat64 panics. ToFloat64 returns +// the value of the collected Metric. +// +// The Collector provided is typically a simple instance of Gauge or Counter, or +// – less commonly – a GaugeVec or CounterVec with exactly one element. But any +// Collector fulfilling the prerequisites described above will do. +// +// Use this function with caution. It is computationally very expensive and thus +// not suited at all to read values from Metrics in regular code. This is really +// only for testing purposes, and even for testing, other approaches are often +// more appropriate (see this package's documentation). +// +// A clear anti-pattern would be to use a metric type from the prometheus +// package to track values that are also needed for something else than the +// exposition of Prometheus metrics. For example, you would like to track the +// number of items in a queue because your code should reject queuing further +// items if a certain limit is reached. It is tempting to track the number of +// items in a prometheus.Gauge, as it is then easily available as a metric for +// exposition, too. However, then you would need to call ToFloat64 in your +// regular code, potentially quite often. The recommended way is to track the +// number of items conventionally (in the way you would have done it without +// considering Prometheus metrics) and then expose the number with a +// prometheus.GaugeFunc. +func ToFloat64(c prometheus.Collector) float64 { + var ( + m prometheus.Metric + mCount int + mChan = make(chan prometheus.Metric) + done = make(chan struct{}) + ) + + go func() { + for m = range mChan { + mCount++ + } + close(done) + }() + + c.Collect(mChan) + close(mChan) + <-done + + if mCount != 1 { + panic(fmt.Errorf("collected %d metrics instead of exactly 1", mCount)) + } + + pb := &dto.Metric{} + m.Write(pb) + if pb.Gauge != nil { + return pb.Gauge.GetValue() + } + if pb.Counter != nil { + return pb.Counter.GetValue() + } + if pb.Untyped != nil { + return pb.Untyped.GetValue() + } + panic(fmt.Errorf("collected a non-gauge/counter/untyped metric: %s", pb)) +} + +// CollectAndCount registers the provided Collector with a newly created +// pedantic Registry. It then calls GatherAndCount with that Registry and with +// the provided metricNames. In the unlikely case that the registration or the +// gathering fails, this function panics. (This is inconsistent with the other +// CollectAnd… functions in this package and has historical reasons. Changing +// the function signature would be a breaking change and will therefore only +// happen with the next major version bump.) +func CollectAndCount(c prometheus.Collector, metricNames ...string) int { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + panic(fmt.Errorf("registering collector failed: %s", err)) + } + result, err := GatherAndCount(reg, metricNames...) + if err != nil { + panic(err) + } + return result +} + +// GatherAndCount gathers all metrics from the provided Gatherer and counts +// them. It returns the number of metric children in all gathered metric +// families together. If any metricNames are provided, only metrics with those +// names are counted. +func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { + got, err := g.Gather() + if err != nil { + return 0, fmt.Errorf("gathering metrics failed: %s", err) + } + if metricNames != nil { + got = filterMetrics(got, metricNames) + } + + result := 0 + for _, mf := range got { + result += len(mf.GetMetric()) + } + return result, nil +} + +// CollectAndCompare registers the provided Collector with a newly created +// pedantic Registry. It then calls GatherAndCompare with that Registry and with +// the provided metricNames. +func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return fmt.Errorf("registering collector failed: %s", err) + } + return GatherAndCompare(reg, expected, metricNames...) +} + +// GatherAndCompare gathers all metrics from the provided Gatherer and compares +// it to an expected output read from the provided Reader in the Prometheus text +// exposition format. If any metricNames are provided, only metrics with those +// names are compared. +func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { + got, err := g.Gather() + if err != nil { + return fmt.Errorf("gathering metrics failed: %s", err) + } + if metricNames != nil { + got = filterMetrics(got, metricNames) + } + var tp expfmt.TextParser + wantRaw, err := tp.TextToMetricFamilies(expected) + if err != nil { + return fmt.Errorf("parsing expected metrics failed: %s", err) + } + want := internal.NormalizeMetricFamilies(wantRaw) + + return compare(got, want) +} + +// compare encodes both provided slices of metric families into the text format, +// compares their string message, and returns an error if they do not match. +// The error contains the encoded text of both the desired and the actual +// result. +func compare(got, want []*dto.MetricFamily) error { + var gotBuf, wantBuf bytes.Buffer + enc := expfmt.NewEncoder(&gotBuf, expfmt.FmtText) + for _, mf := range got { + if err := enc.Encode(mf); err != nil { + return fmt.Errorf("encoding gathered metrics failed: %s", err) + } + } + enc = expfmt.NewEncoder(&wantBuf, expfmt.FmtText) + for _, mf := range want { + if err := enc.Encode(mf); err != nil { + return fmt.Errorf("encoding expected metrics failed: %s", err) + } + } + + if wantBuf.String() != gotBuf.String() { + return fmt.Errorf(` +metric output does not match expectation; want: + +%s +got: + +%s`, wantBuf.String(), gotBuf.String()) + + } + return nil +} + +func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { + var filtered []*dto.MetricFamily + for _, m := range metrics { + for _, name := range names { + if m.GetName() == name { + filtered = append(filtered, m) + break + } + } + } + return filtered +} diff --git a/vendor/go.etcd.io/etcd/LICENSE b/vendor/go.etcd.io/etcd/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/go.etcd.io/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.etcd.io/etcd/NOTICE b/vendor/go.etcd.io/etcd/NOTICE new file mode 100644 index 0000000000..b39ddfa5cb --- /dev/null +++ b/vendor/go.etcd.io/etcd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go b/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go new file mode 100644 index 0000000000..7e038df014 --- /dev/null +++ b/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go @@ -0,0 +1,977 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: auth.proto + +/* + Package authpb is a generated protocol buffer package. + + It is generated from these files: + auth.proto + + It has these top-level messages: + UserAddOptions + User + Permission + Role +*/ +package authpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Permission_Type int32 + +const ( + READ Permission_Type = 0 + WRITE Permission_Type = 1 + READWRITE Permission_Type = 2 +) + +var Permission_Type_name = map[int32]string{ + 0: "READ", + 1: "WRITE", + 2: "READWRITE", +} +var Permission_Type_value = map[string]int32{ + "READ": 0, + "WRITE": 1, + "READWRITE": 2, +} + +func (x Permission_Type) String() string { + return proto.EnumName(Permission_Type_name, int32(x)) +} +func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2, 0} } + +type UserAddOptions struct { + NoPassword bool `protobuf:"varint,1,opt,name=no_password,json=noPassword,proto3" json:"no_password,omitempty"` +} + +func (m *UserAddOptions) Reset() { *m = UserAddOptions{} } +func (m *UserAddOptions) String() string { return proto.CompactTextString(m) } +func (*UserAddOptions) ProtoMessage() {} +func (*UserAddOptions) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} } + +// User is a single entry in the bucket authUsers +type User struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"` + Options *UserAddOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} } + +// Permission is a single entity +type Permission struct { + PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *Permission) Reset() { *m = Permission{} } +func (m *Permission) String() string { return proto.CompactTextString(m) } +func (*Permission) ProtoMessage() {} +func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} } + +// Role is a single entry in the bucket authRoles +type Role struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"` +} + +func (m *Role) Reset() { *m = Role{} } +func (m *Role) String() string { return proto.CompactTextString(m) } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{3} } + +func init() { + proto.RegisterType((*UserAddOptions)(nil), "authpb.UserAddOptions") + proto.RegisterType((*User)(nil), "authpb.User") + proto.RegisterType((*Permission)(nil), "authpb.Permission") + proto.RegisterType((*Role)(nil), "authpb.Role") + proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) +} +func (m *UserAddOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserAddOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NoPassword { + dAtA[i] = 0x8 + i++ + if m.NoPassword { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *User) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *User) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Options != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.Options.Size())) + n1, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *Permission) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Permission) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PermType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + return i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.KeyPermission) > 0 { + for _, msg := range m.KeyPermission { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *UserAddOptions) Size() (n int) { + var l int + _ = l + if m.NoPassword { + n += 2 + } + return n +} + +func (m *User) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovAuth(uint64(l)) + } + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func (m *Permission) Size() (n int) { + var l int + _ = l + if m.PermType != 0 { + n += 1 + sovAuth(uint64(m.PermType)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func (m *Role) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.KeyPermission) > 0 { + for _, e := range m.KeyPermission { + l = e.Size() + n += 1 + l + sovAuth(uint64(l)) + } + } + return n +} + +func sovAuth(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozAuth(x uint64) (n int) { + return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *UserAddOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserAddOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserAddOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoPassword", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoPassword = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *User) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: User: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...) + if m.Password == nil { + m.Password = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &UserAddOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Permission) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Permission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType) + } + m.PermType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PermType |= (Permission_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyPermission = append(m.KeyPermission, &Permission{}) + if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAuth(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthAuth + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAuth(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } + +var fileDescriptorAuth = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xea, 0x40, + 0x14, 0xc6, 0x3b, 0xb4, 0x70, 0xdb, 0xc3, 0x85, 0x90, 0x13, 0x72, 0x6f, 0x83, 0x49, 0x6d, 0xba, + 0x6a, 0x5c, 0x54, 0x85, 0x8d, 0x5b, 0x8c, 0x2c, 0x5c, 0x49, 0x26, 0x18, 0x97, 0xa4, 0xa4, 0x13, + 0x24, 0xc0, 0x4c, 0x33, 0x83, 0x31, 0x6c, 0x7c, 0x0e, 0x17, 0x3e, 0x10, 0x4b, 0x1e, 0x41, 0xf0, + 0x45, 0x4c, 0x67, 0xf8, 0x13, 0xa2, 0xbb, 0xef, 0x7c, 0xe7, 0xfb, 0x66, 0x7e, 0x99, 0x01, 0x48, + 0x5f, 0x16, 0xcf, 0x49, 0x2e, 0xc5, 0x42, 0x60, 0xa5, 0xd0, 0xf9, 0xa8, 0xd5, 0x1c, 0x8b, 0xb1, + 0xd0, 0xd6, 0x65, 0xa1, 0xcc, 0x36, 0xba, 0x86, 0xfa, 0xa3, 0x62, 0xb2, 0x9b, 0x65, 0x0f, 0xf9, + 0x62, 0x22, 0xb8, 0xc2, 0x73, 0xa8, 0x72, 0x31, 0xcc, 0x53, 0xa5, 0x5e, 0x85, 0xcc, 0x7c, 0x12, + 0x92, 0xd8, 0xa5, 0xc0, 0x45, 0x7f, 0xe7, 0x44, 0x6f, 0xe0, 0x14, 0x15, 0x44, 0x70, 0x78, 0x3a, + 0x67, 0x3a, 0xf1, 0x97, 0x6a, 0x8d, 0x2d, 0x70, 0x0f, 0xcd, 0x92, 0xf6, 0x0f, 0x33, 0x36, 0xa1, + 0x2c, 0xc5, 0x8c, 0x29, 0xdf, 0x0e, 0xed, 0xd8, 0xa3, 0x66, 0xc0, 0x2b, 0xf8, 0x23, 0xcc, 0xcd, + 0xbe, 0x13, 0x92, 0xb8, 0xda, 0xfe, 0x97, 0x18, 0xe0, 0xe4, 0x94, 0x8b, 0xee, 0x63, 0xd1, 0x07, + 0x01, 0xe8, 0x33, 0x39, 0x9f, 0x28, 0x35, 0x11, 0x1c, 0x3b, 0xe0, 0xe6, 0x4c, 0xce, 0x07, 0xcb, + 0xdc, 0xa0, 0xd4, 0xdb, 0xff, 0xf7, 0x27, 0x1c, 0x53, 0x49, 0xb1, 0xa6, 0x87, 0x20, 0x36, 0xc0, + 0x9e, 0xb2, 0xe5, 0x0e, 0xb1, 0x90, 0x78, 0x06, 0x9e, 0x4c, 0xf9, 0x98, 0x0d, 0x19, 0xcf, 0x7c, + 0xdb, 0xa0, 0x6b, 0xa3, 0xc7, 0xb3, 0xe8, 0x02, 0x1c, 0x5d, 0x73, 0xc1, 0xa1, 0xbd, 0xee, 0x5d, + 0xc3, 0x42, 0x0f, 0xca, 0x4f, 0xf4, 0x7e, 0xd0, 0x6b, 0x10, 0xac, 0x81, 0x57, 0x98, 0x66, 0x2c, + 0x45, 0x03, 0x70, 0xa8, 0x98, 0xb1, 0x5f, 0x9f, 0xe7, 0x06, 0x6a, 0x53, 0xb6, 0x3c, 0x62, 0xf9, + 0xa5, 0xd0, 0x8e, 0xab, 0x6d, 0xfc, 0x09, 0x4c, 0x4f, 0x83, 0xb7, 0xfe, 0x6a, 0x13, 0x58, 0xeb, + 0x4d, 0x60, 0xad, 0xb6, 0x01, 0x59, 0x6f, 0x03, 0xf2, 0xb9, 0x0d, 0xc8, 0xfb, 0x57, 0x60, 0x8d, + 0x2a, 0xfa, 0x23, 0x3b, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x66, 0xc6, 0x9d, 0xf4, 0x01, + 0x00, 0x00, +} diff --git a/vendor/go.etcd.io/etcd/auth/authpb/auth.proto b/vendor/go.etcd.io/etcd/auth/authpb/auth.proto new file mode 100644 index 0000000000..8f82b7cf1e --- /dev/null +++ b/vendor/go.etcd.io/etcd/auth/authpb/auth.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; +package authpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +message UserAddOptions { + bool no_password = 1; +}; + +// User is a single entry in the bucket authUsers +message User { + bytes name = 1; + bytes password = 2; + repeated string roles = 3; + UserAddOptions options = 4; +} + +// Permission is a single entity +message Permission { + enum Type { + READ = 0; + WRITE = 1; + READWRITE = 2; + } + Type permType = 1; + + bytes key = 2; + bytes range_end = 3; +} + +// Role is a single entry in the bucket authRoles +message Role { + bytes name = 1; + + repeated Permission keyPermission = 2; +} diff --git a/vendor/go.etcd.io/etcd/clientv3/README.md b/vendor/go.etcd.io/etcd/clientv3/README.md new file mode 100644 index 0000000000..6c6fe7c67c --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/README.md @@ -0,0 +1,85 @@ +# etcd/clientv3 + +[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3) + +`etcd/clientv3` is the official Go etcd client for v3. + +## Install + +```bash +go get go.etcd.io/etcd/clientv3 +``` + +## Get started + +Create client using `clientv3.New`: + +```go +cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, + DialTimeout: 5 * time.Second, +}) +if err != nil { + // handle error! +} +defer cli.Close() +``` + +etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses +[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. +If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, +pass `context.WithTimeout` to APIs: + +```go +ctx, cancel := context.WithTimeout(context.Background(), timeout) +resp, err := cli.Put(ctx, "sample_key", "sample_value") +cancel() +if err != nil { + // handle error! +} +// use the response +``` + +For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). + +## Error Handling + +etcd client returns 2 types of errors: + +1. context error: canceled or deadline exceeded. +2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes). + +Here is the example code to handle client errors: + +```go +resp, err := cli.Put(ctx, "", "") +if err != nil { + switch err { + case context.Canceled: + log.Fatalf("ctx is canceled by another routine: %v", err) + case context.DeadlineExceeded: + log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) + case rpctypes.ErrEmptyKey: + log.Fatalf("client-side error: %v", err) + default: + log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) + } +} +``` + +## Metrics + +The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/master/clientv3/example_metrics_test.go). + +## Namespacing + +The [namespace](https://godoc.org/go.etcd.io/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. + +## Request size limit + +Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`. + +## Examples + +More code examples can be found at [GoDoc](https://godoc.org/go.etcd.io/etcd/clientv3). diff --git a/vendor/go.etcd.io/etcd/clientv3/auth.go b/vendor/go.etcd.io/etcd/clientv3/auth.go new file mode 100644 index 0000000000..c954f1bf47 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/auth.go @@ -0,0 +1,242 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "strings" + + "go.etcd.io/etcd/auth/authpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "google.golang.org/grpc" +) + +type ( + AuthEnableResponse pb.AuthEnableResponse + AuthDisableResponse pb.AuthDisableResponse + AuthenticateResponse pb.AuthenticateResponse + AuthUserAddResponse pb.AuthUserAddResponse + AuthUserDeleteResponse pb.AuthUserDeleteResponse + AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse + AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse + AuthUserGetResponse pb.AuthUserGetResponse + AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse + AuthRoleAddResponse pb.AuthRoleAddResponse + AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse + AuthRoleGetResponse pb.AuthRoleGetResponse + AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse + AuthRoleDeleteResponse pb.AuthRoleDeleteResponse + AuthUserListResponse pb.AuthUserListResponse + AuthRoleListResponse pb.AuthRoleListResponse + + PermissionType authpb.Permission_Type + Permission authpb.Permission +) + +const ( + PermRead = authpb.READ + PermWrite = authpb.WRITE + PermReadWrite = authpb.READWRITE +) + +type UserAddOptions authpb.UserAddOptions + +type Auth interface { + // AuthEnable enables auth of an etcd cluster. + AuthEnable(ctx context.Context) (*AuthEnableResponse, error) + + // AuthDisable disables auth of an etcd cluster. + AuthDisable(ctx context.Context) (*AuthDisableResponse, error) + + // UserAdd adds a new user to an etcd cluster. + UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) + + // UserAddWithOptions adds a new user to an etcd cluster with some options. + UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error) + + // UserDelete deletes a user from an etcd cluster. + UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) + + // UserChangePassword changes a password of a user. + UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) + + // UserGrantRole grants a role to a user. + UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) + + // UserGet gets a detailed information of a user. + UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) + + // UserList gets a list of all users. + UserList(ctx context.Context) (*AuthUserListResponse, error) + + // UserRevokeRole revokes a role of a user. + UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) + + // RoleAdd adds a new role to an etcd cluster. + RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) + + // RoleGrantPermission grants a permission to a role. + RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) + + // RoleGet gets a detailed information of a role. + RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) + + // RoleList gets a list of all roles. + RoleList(ctx context.Context) (*AuthRoleListResponse, error) + + // RoleRevokePermission revokes a permission from a role. + RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) + + // RoleDelete deletes a role. + RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) +} + +type authClient struct { + remote pb.AuthClient + callOpts []grpc.CallOption +} + +func NewAuth(c *Client) Auth { + api := &authClient{remote: RetryAuthClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) + return (*AuthEnableResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) + return (*AuthDisableResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { + resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) + return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { + resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { + resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) + return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) + return (*AuthUserGetResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) + return (*AuthUserListResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { + resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) + return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { + resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) + return (*AuthRoleAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { + perm := &authpb.Permission{ + Key: []byte(key), + RangeEnd: []byte(rangeEnd), + PermType: authpb.Permission_Type(permType), + } + resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) + return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) + return (*AuthRoleGetResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) + return (*AuthRoleListResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...) + return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { + resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) + return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) +} + +func StrToPermissionType(s string) (PermissionType, error) { + val, ok := authpb.Permission_Type_value[strings.ToUpper(s)] + if ok { + return PermissionType(val), nil + } + return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s) +} + +type authenticator struct { + conn *grpc.ClientConn // conn in-use + remote pb.AuthClient + callOpts []grpc.CallOption +} + +func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthenticateResponse)(resp), toErr(ctx, err) +} + +func (auth *authenticator) close() { + auth.conn.Close() +} + +func newAuthenticator(ctx context.Context, target string, opts []grpc.DialOption, c *Client) (*authenticator, error) { + conn, err := grpc.DialContext(ctx, target, opts...) + if err != nil { + return nil, err + } + + api := &authenticator{ + conn: conn, + remote: pb.NewAuthClient(conn), + } + if c != nil { + api.callOpts = c.callOpts + } + return api, nil +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go b/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go new file mode 100644 index 0000000000..d02a7eec7c --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go @@ -0,0 +1,293 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package balancer implements client balancer. +package balancer + +import ( + "strconv" + "sync" + "time" + + "go.etcd.io/etcd/clientv3/balancer/connectivity" + "go.etcd.io/etcd/clientv3/balancer/picker" + + "go.uber.org/zap" + "google.golang.org/grpc/balancer" + grpcconnectivity "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" + _ "google.golang.org/grpc/resolver/dns" // register DNS resolver + _ "google.golang.org/grpc/resolver/passthrough" // register passthrough resolver +) + +// Config defines balancer configurations. +type Config struct { + // Policy configures balancer policy. + Policy picker.Policy + + // Picker implements gRPC picker. + // Leave empty if "Policy" field is not custom. + // TODO: currently custom policy is not supported. + // Picker picker.Picker + + // Name defines an additional name for balancer. + // Useful for balancer testing to avoid register conflicts. + // If empty, defaults to policy name. + Name string + + // Logger configures balancer logging. + // If nil, logs are discarded. + Logger *zap.Logger +} + +// RegisterBuilder creates and registers a builder. Since this function calls balancer.Register, it +// must be invoked at initialization time. +func RegisterBuilder(cfg Config) { + bb := &builder{cfg} + balancer.Register(bb) + + bb.cfg.Logger.Debug( + "registered balancer", + zap.String("policy", bb.cfg.Policy.String()), + zap.String("name", bb.cfg.Name), + ) +} + +type builder struct { + cfg Config +} + +// Build is called initially when creating "ccBalancerWrapper". +// "grpc.Dial" is called to this client connection. +// Then, resolved addresses will be handled via "HandleResolvedAddrs". +func (b *builder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + bb := &baseBalancer{ + id: strconv.FormatInt(time.Now().UnixNano(), 36), + policy: b.cfg.Policy, + name: b.cfg.Name, + lg: b.cfg.Logger, + + addrToSc: make(map[resolver.Address]balancer.SubConn), + scToAddr: make(map[balancer.SubConn]resolver.Address), + scToSt: make(map[balancer.SubConn]grpcconnectivity.State), + + currentConn: nil, + connectivityRecorder: connectivity.New(b.cfg.Logger), + + // initialize picker always returns "ErrNoSubConnAvailable" + picker: picker.NewErr(balancer.ErrNoSubConnAvailable), + } + + // TODO: support multiple connections + bb.mu.Lock() + bb.currentConn = cc + bb.mu.Unlock() + + bb.lg.Info( + "built balancer", + zap.String("balancer-id", bb.id), + zap.String("policy", bb.policy.String()), + zap.String("resolver-target", cc.Target()), + ) + return bb +} + +// Name implements "grpc/balancer.Builder" interface. +func (b *builder) Name() string { return b.cfg.Name } + +// Balancer defines client balancer interface. +type Balancer interface { + // Balancer is called on specified client connection. Client initiates gRPC + // connection with "grpc.Dial(addr, grpc.WithBalancerName)", and then those resolved + // addresses are passed to "grpc/balancer.Balancer.HandleResolvedAddrs". + // For each resolved address, balancer calls "balancer.ClientConn.NewSubConn". + // "grpc/balancer.Balancer.HandleSubConnStateChange" is called when connectivity state + // changes, thus requires failover logic in this method. + balancer.Balancer + + // Picker calls "Pick" for every client request. + picker.Picker +} + +type baseBalancer struct { + id string + policy picker.Policy + name string + lg *zap.Logger + + mu sync.RWMutex + + addrToSc map[resolver.Address]balancer.SubConn + scToAddr map[balancer.SubConn]resolver.Address + scToSt map[balancer.SubConn]grpcconnectivity.State + + currentConn balancer.ClientConn + connectivityRecorder connectivity.Recorder + + picker picker.Picker +} + +// HandleResolvedAddrs implements "grpc/balancer.Balancer" interface. +// gRPC sends initial or updated resolved addresses from "Build". +func (bb *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err)) + return + } + bb.lg.Info("resolved", + zap.String("picker", bb.picker.String()), + zap.String("balancer-id", bb.id), + zap.Strings("addresses", addrsToStrings(addrs)), + ) + + bb.mu.Lock() + defer bb.mu.Unlock() + + resolved := make(map[resolver.Address]struct{}) + for _, addr := range addrs { + resolved[addr] = struct{}{} + if _, ok := bb.addrToSc[addr]; !ok { + sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{}) + if err != nil { + bb.lg.Warn("NewSubConn failed", zap.String("picker", bb.picker.String()), zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr)) + continue + } + bb.lg.Info("created subconn", zap.String("address", addr.Addr)) + bb.addrToSc[addr] = sc + bb.scToAddr[sc] = addr + bb.scToSt[sc] = grpcconnectivity.Idle + sc.Connect() + } + } + + for addr, sc := range bb.addrToSc { + if _, ok := resolved[addr]; !ok { + // was removed by resolver or failed to create subconn + bb.currentConn.RemoveSubConn(sc) + delete(bb.addrToSc, addr) + + bb.lg.Info( + "removed subconn", + zap.String("picker", bb.picker.String()), + zap.String("balancer-id", bb.id), + zap.String("address", addr.Addr), + zap.String("subconn", scToString(sc)), + ) + + // Keep the state of this sc in bb.scToSt until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + // (DO NOT) delete(bb.scToAddr, sc) + // (DO NOT) delete(bb.scToSt, sc) + } + } +} + +// HandleSubConnStateChange implements "grpc/balancer.Balancer" interface. +func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s grpcconnectivity.State) { + bb.mu.Lock() + defer bb.mu.Unlock() + + old, ok := bb.scToSt[sc] + if !ok { + bb.lg.Warn( + "state change for an unknown subconn", + zap.String("picker", bb.picker.String()), + zap.String("balancer-id", bb.id), + zap.String("subconn", scToString(sc)), + zap.Int("subconn-size", len(bb.scToAddr)), + zap.String("state", s.String()), + ) + return + } + + bb.lg.Info( + "state changed", + zap.String("picker", bb.picker.String()), + zap.String("balancer-id", bb.id), + zap.Bool("connected", s == grpcconnectivity.Ready), + zap.String("subconn", scToString(sc)), + zap.Int("subconn-size", len(bb.scToAddr)), + zap.String("address", bb.scToAddr[sc].Addr), + zap.String("old-state", old.String()), + zap.String("new-state", s.String()), + ) + + bb.scToSt[sc] = s + switch s { + case grpcconnectivity.Idle: + sc.Connect() + case grpcconnectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scToSt. Remove state for this sc here. + delete(bb.scToAddr, sc) + delete(bb.scToSt, sc) + } + + oldAggrState := bb.connectivityRecorder.GetCurrentState() + bb.connectivityRecorder.RecordTransition(old, s) + + // Update balancer picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == grpcconnectivity.Ready) != (old == grpcconnectivity.Ready) || + (bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure) != (oldAggrState == grpcconnectivity.TransientFailure) { + bb.updatePicker() + } + + bb.currentConn.UpdateBalancerState(bb.connectivityRecorder.GetCurrentState(), bb.picker) +} + +func (bb *baseBalancer) updatePicker() { + if bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure { + bb.picker = picker.NewErr(balancer.ErrTransientFailure) + bb.lg.Info( + "updated picker to transient error picker", + zap.String("picker", bb.picker.String()), + zap.String("balancer-id", bb.id), + zap.String("policy", bb.policy.String()), + ) + return + } + + // only pass ready subconns to picker + scToAddr := make(map[balancer.SubConn]resolver.Address) + for addr, sc := range bb.addrToSc { + if st, ok := bb.scToSt[sc]; ok && st == grpcconnectivity.Ready { + scToAddr[sc] = addr + } + } + + bb.picker = picker.New(picker.Config{ + Policy: bb.policy, + Logger: bb.lg, + SubConnToResolverAddress: scToAddr, + }) + bb.lg.Info( + "updated picker", + zap.String("picker", bb.picker.String()), + zap.String("balancer-id", bb.id), + zap.String("policy", bb.policy.String()), + zap.Strings("subconn-ready", scsToStrings(scToAddr)), + zap.Int("subconn-size", len(scToAddr)), + ) +} + +// Close implements "grpc/balancer.Balancer" interface. +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (bb *baseBalancer) Close() { + // TODO +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go b/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go new file mode 100644 index 0000000000..4c4ad363a7 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go @@ -0,0 +1,93 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package connectivity implements client connectivity operations. +package connectivity + +import ( + "sync" + + "go.uber.org/zap" + "google.golang.org/grpc/connectivity" +) + +// Recorder records gRPC connectivity. +type Recorder interface { + GetCurrentState() connectivity.State + RecordTransition(oldState, newState connectivity.State) +} + +// New returns a new Recorder. +func New(lg *zap.Logger) Recorder { + return &recorder{lg: lg} +} + +// recorder takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go +type recorder struct { + lg *zap.Logger + + mu sync.RWMutex + + cur connectivity.State + + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +func (rc *recorder) GetCurrentState() (state connectivity.State) { + rc.mu.RLock() + defer rc.mu.RUnlock() + return rc.cur +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +// +// Idle and Shutdown are not considered. +// +// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go +func (rc *recorder) RecordTransition(oldState, newState connectivity.State) { + rc.mu.Lock() + defer rc.mu.Unlock() + + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + rc.numReady += updateVal + case connectivity.Connecting: + rc.numConnecting += updateVal + case connectivity.TransientFailure: + rc.numTransientFailure += updateVal + default: + rc.lg.Warn("connectivity recorder received unknown state", zap.String("connectivity-state", state.String())) + } + } + + switch { // must be exclusive, no overlap + case rc.numReady > 0: + rc.cur = connectivity.Ready + case rc.numConnecting > 0: + rc.cur = connectivity.Connecting + default: + rc.cur = connectivity.TransientFailure + } +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go new file mode 100644 index 0000000000..35dabf5532 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package picker defines/implements client balancer picker policy. +package picker diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go new file mode 100644 index 0000000000..f4b941d652 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go @@ -0,0 +1,39 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package picker + +import ( + "context" + + "google.golang.org/grpc/balancer" +) + +// NewErr returns a picker that always returns err on "Pick". +func NewErr(err error) Picker { + return &errPicker{p: Error, err: err} +} + +type errPicker struct { + p Policy + err error +} + +func (ep *errPicker) String() string { + return ep.p.String() +} + +func (ep *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, ep.err +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go new file mode 100644 index 0000000000..bd1a5d25e8 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go @@ -0,0 +1,91 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package picker + +import ( + "fmt" + + "go.uber.org/zap" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// Picker defines balancer Picker methods. +type Picker interface { + balancer.Picker + String() string +} + +// Config defines picker configuration. +type Config struct { + // Policy specifies etcd clientv3's built in balancer policy. + Policy Policy + + // Logger defines picker logging object. + Logger *zap.Logger + + // SubConnToResolverAddress maps each gRPC sub-connection to an address. + // Basically, it is a list of addresses that the Picker can pick from. + SubConnToResolverAddress map[balancer.SubConn]resolver.Address +} + +// Policy defines balancer picker policy. +type Policy uint8 + +const ( + // Error is error picker policy. + Error Policy = iota + + // RoundrobinBalanced balances loads over multiple endpoints + // and implements failover in roundrobin fashion. + RoundrobinBalanced + + // Custom defines custom balancer picker. + // TODO: custom picker is not supported yet. + Custom +) + +func (p Policy) String() string { + switch p { + case Error: + return "picker-error" + + case RoundrobinBalanced: + return "picker-roundrobin-balanced" + + case Custom: + panic("'custom' picker policy is not supported yet") + + default: + panic(fmt.Errorf("invalid balancer picker policy (%d)", p)) + } +} + +// New creates a new Picker. +func New(cfg Config) Picker { + switch cfg.Policy { + case Error: + panic("'error' picker policy is not supported here; use 'picker.NewErr'") + + case RoundrobinBalanced: + return newRoundrobinBalanced(cfg) + + case Custom: + panic("'custom' picker policy is not supported yet") + + default: + panic(fmt.Errorf("invalid balancer picker policy (%d)", cfg.Policy)) + } +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go new file mode 100644 index 0000000000..e3971ecc42 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go @@ -0,0 +1,95 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package picker + +import ( + "context" + "sync" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// newRoundrobinBalanced returns a new roundrobin balanced picker. +func newRoundrobinBalanced(cfg Config) Picker { + scs := make([]balancer.SubConn, 0, len(cfg.SubConnToResolverAddress)) + for sc := range cfg.SubConnToResolverAddress { + scs = append(scs, sc) + } + return &rrBalanced{ + p: RoundrobinBalanced, + lg: cfg.Logger, + scs: scs, + scToAddr: cfg.SubConnToResolverAddress, + } +} + +type rrBalanced struct { + p Policy + + lg *zap.Logger + + mu sync.RWMutex + next int + scs []balancer.SubConn + scToAddr map[balancer.SubConn]resolver.Address +} + +func (rb *rrBalanced) String() string { return rb.p.String() } + +// Pick is called for every client request. +func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { + rb.mu.RLock() + n := len(rb.scs) + rb.mu.RUnlock() + if n == 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + + rb.mu.Lock() + cur := rb.next + sc := rb.scs[cur] + picked := rb.scToAddr[sc].Addr + rb.next = (rb.next + 1) % len(rb.scs) + rb.mu.Unlock() + + rb.lg.Debug( + "picked", + zap.String("picker", rb.p.String()), + zap.String("address", picked), + zap.Int("subconn-index", cur), + zap.Int("subconn-size", n), + ) + + doneFunc := func(info balancer.DoneInfo) { + // TODO: error handling? + fss := []zapcore.Field{ + zap.Error(info.Err), + zap.String("picker", rb.p.String()), + zap.String("address", picked), + zap.Bool("success", info.Err == nil), + zap.Bool("bytes-sent", info.BytesSent), + zap.Bool("bytes-received", info.BytesReceived), + } + if info.Err == nil { + rb.lg.Debug("balancer done", fss...) + } else { + rb.lg.Warn("balancer failed", fss...) + } + } + return sc, doneFunc, nil +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go new file mode 100644 index 0000000000..2837bd4180 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go @@ -0,0 +1,247 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package endpoint resolves etcd entpoints using grpc targets of the form 'endpoint:///'. +package endpoint + +import ( + "context" + "fmt" + "net" + "net/url" + "strings" + "sync" + + "google.golang.org/grpc/resolver" +) + +const scheme = "endpoint" + +var ( + targetPrefix = fmt.Sprintf("%s://", scheme) + + bldr *builder +) + +func init() { + bldr = &builder{ + resolverGroups: make(map[string]*ResolverGroup), + } + resolver.Register(bldr) +} + +type builder struct { + mu sync.RWMutex + resolverGroups map[string]*ResolverGroup +} + +// NewResolverGroup creates a new ResolverGroup with the given id. +func NewResolverGroup(id string) (*ResolverGroup, error) { + return bldr.newResolverGroup(id) +} + +// ResolverGroup keeps all endpoints of resolvers using a common endpoint:/// target +// up-to-date. +type ResolverGroup struct { + mu sync.RWMutex + id string + endpoints []string + resolvers []*Resolver +} + +func (e *ResolverGroup) addResolver(r *Resolver) { + e.mu.Lock() + addrs := epsToAddrs(e.endpoints...) + e.resolvers = append(e.resolvers, r) + e.mu.Unlock() + r.cc.NewAddress(addrs) +} + +func (e *ResolverGroup) removeResolver(r *Resolver) { + e.mu.Lock() + for i, er := range e.resolvers { + if er == r { + e.resolvers = append(e.resolvers[:i], e.resolvers[i+1:]...) + break + } + } + e.mu.Unlock() +} + +// SetEndpoints updates the endpoints for ResolverGroup. All registered resolver are updated +// immediately with the new endpoints. +func (e *ResolverGroup) SetEndpoints(endpoints []string) { + addrs := epsToAddrs(endpoints...) + e.mu.Lock() + e.endpoints = endpoints + for _, r := range e.resolvers { + r.cc.NewAddress(addrs) + } + e.mu.Unlock() +} + +// Target constructs a endpoint target using the endpoint id of the ResolverGroup. +func (e *ResolverGroup) Target(endpoint string) string { + return Target(e.id, endpoint) +} + +// Target constructs a endpoint resolver target. +func Target(id, endpoint string) string { + return fmt.Sprintf("%s://%s/%s", scheme, id, endpoint) +} + +// IsTarget checks if a given target string in an endpoint resolver target. +func IsTarget(target string) bool { + return strings.HasPrefix(target, "endpoint://") +} + +func (e *ResolverGroup) Close() { + bldr.close(e.id) +} + +// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target. +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if len(target.Authority) < 1 { + return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to") + } + id := target.Authority + es, err := b.getResolverGroup(id) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + r := &Resolver{ + endpointID: id, + cc: cc, + } + es.addResolver(r) + return r, nil +} + +func (b *builder) newResolverGroup(id string) (*ResolverGroup, error) { + b.mu.RLock() + _, ok := b.resolverGroups[id] + b.mu.RUnlock() + if ok { + return nil, fmt.Errorf("Endpoint already exists for id: %s", id) + } + + es := &ResolverGroup{id: id} + b.mu.Lock() + b.resolverGroups[id] = es + b.mu.Unlock() + return es, nil +} + +func (b *builder) getResolverGroup(id string) (*ResolverGroup, error) { + b.mu.RLock() + es, ok := b.resolverGroups[id] + b.mu.RUnlock() + if !ok { + return nil, fmt.Errorf("ResolverGroup not found for id: %s", id) + } + return es, nil +} + +func (b *builder) close(id string) { + b.mu.Lock() + delete(b.resolverGroups, id) + b.mu.Unlock() +} + +func (b *builder) Scheme() string { + return scheme +} + +// Resolver provides a resolver for a single etcd cluster, identified by name. +type Resolver struct { + endpointID string + cc resolver.ClientConn + sync.RWMutex +} + +// TODO: use balancer.epsToAddrs +func epsToAddrs(eps ...string) (addrs []resolver.Address) { + addrs = make([]resolver.Address, 0, len(eps)) + for _, ep := range eps { + addrs = append(addrs, resolver.Address{Addr: ep}) + } + return addrs +} + +func (*Resolver) ResolveNow(o resolver.ResolveNowOptions) {} + +func (r *Resolver) Close() { + es, err := bldr.getResolverGroup(r.endpointID) + if err != nil { + return + } + es.removeResolver(r) +} + +// ParseEndpoint endpoint parses an endpoint of the form +// (http|https)://*|(unix|unixs)://) +// and returns a protocol ('tcp' or 'unix'), +// host (or filepath if a unix socket), +// scheme (http, https, unix, unixs). +func ParseEndpoint(endpoint string) (proto string, host string, scheme string) { + proto = "tcp" + host = endpoint + url, uerr := url.Parse(endpoint) + if uerr != nil || !strings.Contains(endpoint, "://") { + return proto, host, scheme + } + scheme = url.Scheme + + // strip scheme:// prefix since grpc dials by host + host = url.Host + switch url.Scheme { + case "http", "https": + case "unix", "unixs": + proto = "unix" + host = url.Host + url.Path + default: + proto, host = "", "" + } + return proto, host, scheme +} + +// ParseTarget parses a endpoint:/// string and returns the parsed id and endpoint. +// If the target is malformed, an error is returned. +func ParseTarget(target string) (string, string, error) { + noPrefix := strings.TrimPrefix(target, targetPrefix) + if noPrefix == target { + return "", "", fmt.Errorf("malformed target, %s prefix is required: %s", targetPrefix, target) + } + parts := strings.SplitN(noPrefix, "/", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("malformed target, expected %s:///, but got %s", scheme, target) + } + return parts[0], parts[1], nil +} + +// Dialer dials a endpoint using net.Dialer. +// Context cancelation and timeout are supported. +func Dialer(ctx context.Context, dialEp string) (net.Conn, error) { + proto, host, _ := ParseEndpoint(dialEp) + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + dialer := &net.Dialer{} + if deadline, ok := ctx.Deadline(); ok { + dialer.Deadline = deadline + } + return dialer.DialContext(ctx, proto, host) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go b/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go new file mode 100644 index 0000000000..48eb875074 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go @@ -0,0 +1,68 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package balancer + +import ( + "fmt" + "net/url" + "sort" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +func scToString(sc balancer.SubConn) string { + return fmt.Sprintf("%p", sc) +} + +func scsToStrings(scs map[balancer.SubConn]resolver.Address) (ss []string) { + ss = make([]string, 0, len(scs)) + for sc, a := range scs { + ss = append(ss, fmt.Sprintf("%s (%s)", a.Addr, scToString(sc))) + } + sort.Strings(ss) + return ss +} + +func addrsToStrings(addrs []resolver.Address) (ss []string) { + ss = make([]string, len(addrs)) + for i := range addrs { + ss[i] = addrs[i].Addr + } + sort.Strings(ss) + return ss +} + +func epsToAddrs(eps ...string) (addrs []resolver.Address) { + addrs = make([]resolver.Address, 0, len(eps)) + for _, ep := range eps { + u, err := url.Parse(ep) + if err != nil { + addrs = append(addrs, resolver.Address{Addr: ep, Type: resolver.Backend}) + continue + } + addrs = append(addrs, resolver.Address{Addr: u.Host, Type: resolver.Backend}) + } + return addrs +} + +var genN = new(uint32) + +func genName() string { + now := time.Now().UnixNano() + return fmt.Sprintf("%X%X", now, atomic.AddUint32(genN, 1)) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/client.go b/vendor/go.etcd.io/etcd/clientv3/client.go new file mode 100644 index 0000000000..a35ec679a0 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/client.go @@ -0,0 +1,664 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/google/uuid" + "go.etcd.io/etcd/clientv3/balancer" + "go.etcd.io/etcd/clientv3/balancer/picker" + "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint" + "go.etcd.io/etcd/clientv3/credentials" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/pkg/logutil" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpccredentials "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" +) + +var ( + ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") + ErrOldCluster = errors.New("etcdclient: old cluster version") + + roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String()) +) + +func init() { + lg := zap.NewNop() + if os.Getenv("ETCD_CLIENT_DEBUG") != "" { + lcfg := logutil.DefaultZapLoggerConfig + lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel) + + var err error + lg, err = lcfg.Build() // info level logging + if err != nil { + panic(err) + } + } + + // TODO: support custom balancer + balancer.RegisterBuilder(balancer.Config{ + Policy: picker.RoundrobinBalanced, + Name: roundRobinBalancerName, + Logger: lg, + }) +} + +// Client provides and manages an etcd v3 client session. +type Client struct { + Cluster + KV + Lease + Watcher + Auth + Maintenance + + conn *grpc.ClientConn + + cfg Config + creds grpccredentials.TransportCredentials + resolverGroup *endpoint.ResolverGroup + mu *sync.RWMutex + + ctx context.Context + cancel context.CancelFunc + + // Username is a user name for authentication. + Username string + // Password is a password for authentication. + Password string + authTokenBundle credentials.Bundle + + callOpts []grpc.CallOption + + lg *zap.Logger +} + +// New creates a new etcdv3 client from a given configuration. +func New(cfg Config) (*Client, error) { + if len(cfg.Endpoints) == 0 { + return nil, ErrNoAvailableEndpoints + } + + return newClient(&cfg) +} + +// NewCtxClient creates a client with a context but no underlying grpc +// connection. This is useful for embedded cases that override the +// service interface implementations and do not need connection management. +func NewCtxClient(ctx context.Context) *Client { + cctx, cancel := context.WithCancel(ctx) + return &Client{ctx: cctx, cancel: cancel} +} + +// NewFromURL creates a new etcdv3 client from a URL. +func NewFromURL(url string) (*Client, error) { + return New(Config{Endpoints: []string{url}}) +} + +// NewFromURLs creates a new etcdv3 client from URLs. +func NewFromURLs(urls []string) (*Client, error) { + return New(Config{Endpoints: urls}) +} + +// Close shuts down the client's etcd connections. +func (c *Client) Close() error { + c.cancel() + if c.Watcher != nil { + c.Watcher.Close() + } + if c.Lease != nil { + c.Lease.Close() + } + if c.resolverGroup != nil { + c.resolverGroup.Close() + } + if c.conn != nil { + return toErr(c.ctx, c.conn.Close()) + } + return c.ctx.Err() +} + +// Ctx is a context for "out of band" messages (e.g., for sending +// "clean up" message when another context is canceled). It is +// canceled on client Close(). +func (c *Client) Ctx() context.Context { return c.ctx } + +// Endpoints lists the registered endpoints for the client. +func (c *Client) Endpoints() []string { + // copy the slice; protect original endpoints from being changed + c.mu.RLock() + defer c.mu.RUnlock() + eps := make([]string, len(c.cfg.Endpoints)) + copy(eps, c.cfg.Endpoints) + return eps +} + +// SetEndpoints updates client's endpoints. +func (c *Client) SetEndpoints(eps ...string) { + c.mu.Lock() + defer c.mu.Unlock() + c.cfg.Endpoints = eps + c.resolverGroup.SetEndpoints(eps) +} + +// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. +func (c *Client) Sync(ctx context.Context) error { + mresp, err := c.MemberList(ctx) + if err != nil { + return err + } + var eps []string + for _, m := range mresp.Members { + eps = append(eps, m.ClientURLs...) + } + c.SetEndpoints(eps...) + return nil +} + +func (c *Client) autoSync() { + if c.cfg.AutoSyncInterval == time.Duration(0) { + return + } + + for { + select { + case <-c.ctx.Done(): + return + case <-time.After(c.cfg.AutoSyncInterval): + ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) + err := c.Sync(ctx) + cancel() + if err != nil && err != c.ctx.Err() { + lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err) + } + } + } +} + +func (c *Client) processCreds(scheme string) (creds grpccredentials.TransportCredentials) { + creds = c.creds + switch scheme { + case "unix": + case "http": + creds = nil + case "https", "unixs": + if creds != nil { + break + } + creds = credentials.NewBundle(credentials.Config{}).TransportCredentials() + default: + creds = nil + } + return creds +} + +// dialSetupOpts gives the dial opts prior to any authentication. +func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) { + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + Timeout: c.cfg.DialKeepAliveTimeout, + PermitWithoutStream: c.cfg.PermitWithoutStream, + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } + opts = append(opts, dopts...) + + dialer := endpoint.Dialer + if creds != nil { + opts = append(opts, grpc.WithTransportCredentials(creds)) + // gRPC load balancer workaround. See credentials.transportCredential for details. + if credsDialer, ok := creds.(TransportCredentialsWithDialer); ok { + dialer = credsDialer.Dialer + } + } else { + opts = append(opts, grpc.WithInsecure()) + } + opts = append(opts, grpc.WithContextDialer(dialer)) + + // Interceptor retry and backoff. + // TODO: Replace all of clientv3/retry.go with interceptor based retry, or with + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy + // once it is available. + rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction)) + opts = append(opts, + // Disable stream retry by default since go-grpc-middleware/retry does not support client streams. + // Streams that are safe to retry are enabled individually. + grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)), + grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)), + ) + + return opts, nil +} + +// Dial connects to a single endpoint using the client's config. +func (c *Client) Dial(ep string) (*grpc.ClientConn, error) { + creds, err := c.directDialCreds(ep) + if err != nil { + return nil, err + } + // Use the grpc passthrough resolver to directly dial a single endpoint. + // This resolver passes through the 'unix' and 'unixs' endpoints schemes used + // by etcd without modification, allowing us to directly dial endpoints and + // using the same dial functions that we use for load balancer dialing. + return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds) +} + +func (c *Client) getToken(ctx context.Context) error { + var err error // return last error in a case of fail + var auth *authenticator + + eps := c.Endpoints() + for _, ep := range eps { + // use dial options without dopts to avoid reusing the client balancer + var dOpts []grpc.DialOption + _, host, _ := endpoint.ParseEndpoint(ep) + target := c.resolverGroup.Target(host) + creds := c.dialWithBalancerCreds(ep) + dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...) + if err != nil { + err = fmt.Errorf("failed to configure auth dialer: %v", err) + continue + } + dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName)) + auth, err = newAuthenticator(ctx, target, dOpts, c) + if err != nil { + continue + } + defer auth.close() + + var resp *AuthenticateResponse + resp, err = auth.authenticate(ctx, c.Username, c.Password) + if err != nil { + // return err without retrying other endpoints + if err == rpctypes.ErrAuthNotEnabled { + return err + } + continue + } + + c.authTokenBundle.UpdateAuthToken(resp.Token) + return nil + } + + return err +} + +// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host +// of the provided endpoint determines the scheme used for all endpoints of the client connection. +func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { + _, host, _ := endpoint.ParseEndpoint(ep) + target := c.resolverGroup.Target(host) + creds := c.dialWithBalancerCreds(ep) + return c.dial(target, creds, dopts...) +} + +// dial configures and dials any grpc balancer target. +func (c *Client) dial(target string, creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { + opts, err := c.dialSetupOpts(creds, dopts...) + if err != nil { + return nil, fmt.Errorf("failed to configure dialer: %v", err) + } + + if c.Username != "" && c.Password != "" { + c.authTokenBundle = credentials.NewBundle(credentials.Config{}) + + ctx, cancel := c.ctx, func() {} + if c.cfg.DialTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) + } + + err = c.getToken(ctx) + if err != nil { + if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { + if err == ctx.Err() && ctx.Err() != c.ctx.Err() { + err = context.DeadlineExceeded + } + cancel() + return nil, err + } + } else { + opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials())) + } + cancel() + } + + opts = append(opts, c.cfg.DialOptions...) + + dctx := c.ctx + if c.cfg.DialTimeout > 0 { + var cancel context.CancelFunc + dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) + defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options? + } + + conn, err := grpc.DialContext(dctx, target, opts...) + if err != nil { + return nil, err + } + return conn, nil +} + +func (c *Client) directDialCreds(ep string) (grpccredentials.TransportCredentials, error) { + _, host, scheme := endpoint.ParseEndpoint(ep) + creds := c.creds + if len(scheme) != 0 { + creds = c.processCreds(scheme) + if creds != nil { + clone := creds.Clone() + // Set the server name must to the endpoint hostname without port since grpc + // otherwise attempts to check if x509 cert is valid for the full endpoint + // including the scheme and port, which fails. + overrideServerName, _, err := net.SplitHostPort(host) + if err != nil { + // Either the host didn't have a port or the host could not be parsed. Either way, continue with the + // original host string. + overrideServerName = host + } + clone.OverrideServerName(overrideServerName) + creds = clone + } + } + return creds, nil +} + +func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials { + _, _, scheme := endpoint.ParseEndpoint(ep) + creds := c.creds + if len(scheme) != 0 { + creds = c.processCreds(scheme) + } + return creds +} + +func newClient(cfg *Config) (*Client, error) { + if cfg == nil { + cfg = &Config{} + } + var creds grpccredentials.TransportCredentials + if cfg.TLS != nil { + creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials() + } + + // use a temporary skeleton client to bootstrap first connection + baseCtx := context.TODO() + if cfg.Context != nil { + baseCtx = cfg.Context + } + + ctx, cancel := context.WithCancel(baseCtx) + client := &Client{ + conn: nil, + cfg: *cfg, + creds: creds, + ctx: ctx, + cancel: cancel, + mu: new(sync.RWMutex), + callOpts: defaultCallOpts, + } + + lcfg := logutil.DefaultZapLoggerConfig + if cfg.LogConfig != nil { + lcfg = *cfg.LogConfig + } + var err error + client.lg, err = lcfg.Build() + if err != nil { + return nil, err + } + + if cfg.Username != "" && cfg.Password != "" { + client.Username = cfg.Username + client.Password = cfg.Password + } + if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { + if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { + return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) + } + callOpts := []grpc.CallOption{ + defaultFailFast, + defaultMaxCallSendMsgSize, + defaultMaxCallRecvMsgSize, + } + if cfg.MaxCallSendMsgSize > 0 { + callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize) + } + if cfg.MaxCallRecvMsgSize > 0 { + callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize) + } + client.callOpts = callOpts + } + + // Prepare a 'endpoint:///' resolver for the client and create a endpoint target to pass + // to dial so the client knows to use this resolver. + client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", uuid.New().String())) + if err != nil { + client.cancel() + return nil, err + } + client.resolverGroup.SetEndpoints(cfg.Endpoints) + + if len(cfg.Endpoints) < 1 { + return nil, fmt.Errorf("at least one Endpoint must is required in client config") + } + dialEndpoint := cfg.Endpoints[0] + + // Use a provided endpoint target so that for https:// without any tls config given, then + // grpc will assume the certificate server name is the endpoint host. + conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName)) + if err != nil { + client.cancel() + client.resolverGroup.Close() + return nil, err + } + // TODO: With the old grpc balancer interface, we waited until the dial timeout + // for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface? + client.conn = conn + + client.Cluster = NewCluster(client) + client.KV = NewKV(client) + client.Lease = NewLease(client) + client.Watcher = NewWatcher(client) + client.Auth = NewAuth(client) + client.Maintenance = NewMaintenance(client) + + if cfg.RejectOldCluster { + if err := client.checkVersion(); err != nil { + client.Close() + return nil, err + } + } + + go client.autoSync() + return client, nil +} + +// roundRobinQuorumBackoff retries against quorum between each backoff. +// This is intended for use with a round robin load balancer. +func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc { + return func(attempt uint) time.Duration { + // after each round robin across quorum, backoff for our wait between duration + n := uint(len(c.Endpoints())) + quorum := (n/2 + 1) + if attempt%quorum == 0 { + c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction)) + return jitterUp(waitBetween, jitterFraction) + } + c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum)) + return 0 + } +} + +func (c *Client) checkVersion() (err error) { + var wg sync.WaitGroup + + eps := c.Endpoints() + errc := make(chan error, len(eps)) + ctx, cancel := context.WithCancel(c.ctx) + if c.cfg.DialTimeout > 0 { + cancel() + ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) + } + + wg.Add(len(eps)) + for _, ep := range eps { + // if cluster is current, any endpoint gives a recent version + go func(e string) { + defer wg.Done() + resp, rerr := c.Status(ctx, e) + if rerr != nil { + errc <- rerr + return + } + vs := strings.Split(resp.Version, ".") + maj, min := 0, 0 + if len(vs) >= 2 { + var serr error + if maj, serr = strconv.Atoi(vs[0]); serr != nil { + errc <- serr + return + } + if min, serr = strconv.Atoi(vs[1]); serr != nil { + errc <- serr + return + } + } + if maj < 3 || (maj == 3 && min < 2) { + rerr = ErrOldCluster + } + errc <- rerr + }(ep) + } + // wait for success + for range eps { + if err = <-errc; err == nil { + break + } + } + cancel() + wg.Wait() + return err +} + +// ActiveConnection returns the current in-use connection +func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } + +// isHaltErr returns true if the given error and context indicate no forward +// progress can be made, even after reconnecting. +func isHaltErr(ctx context.Context, err error) bool { + if ctx != nil && ctx.Err() != nil { + return true + } + if err == nil { + return false + } + ev, _ := status.FromError(err) + // Unavailable codes mean the system will be right back. + // (e.g., can't connect, lost leader) + // Treat Internal codes as if something failed, leaving the + // system in an inconsistent state, but retrying could make progress. + // (e.g., failed in middle of send, corrupted frame) + // TODO: are permanent Internal errors possible from grpc? + return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal +} + +// isUnavailableErr returns true if the given error is an unavailable error +func isUnavailableErr(ctx context.Context, err error) bool { + if ctx != nil && ctx.Err() != nil { + return false + } + if err == nil { + return false + } + ev, ok := status.FromError(err) + if ok { + // Unavailable codes mean the system will be right back. + // (e.g., can't connect, lost leader) + return ev.Code() == codes.Unavailable + } + return false +} + +func toErr(ctx context.Context, err error) error { + if err == nil { + return nil + } + err = rpctypes.Error(err) + if _, ok := err.(rpctypes.EtcdError); ok { + return err + } + if ev, ok := status.FromError(err); ok { + code := ev.Code() + switch code { + case codes.DeadlineExceeded: + fallthrough + case codes.Canceled: + if ctx.Err() != nil { + err = ctx.Err() + } + } + } + return err +} + +func canceledByCaller(stopCtx context.Context, err error) bool { + if stopCtx.Err() == nil || err == nil { + return false + } + + return err == context.Canceled || err == context.DeadlineExceeded +} + +// IsConnCanceled returns true, if error is from a closed gRPC connection. +// ref. https://github.com/grpc/grpc-go/pull/1854 +func IsConnCanceled(err error) bool { + if err == nil { + return false + } + + // >= gRPC v1.23.x + s, ok := status.FromError(err) + if ok { + // connection is canceled or server has already closed the connection + return s.Code() == codes.Canceled || s.Message() == "transport is closing" + } + + // >= gRPC v1.10.x + if err == context.Canceled { + return true + } + + // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")' + return strings.Contains(err.Error(), "grpc: the client connection is closing") +} + +// TransportCredentialsWithDialer is for a gRPC load balancer workaround. See credentials.transportCredential for details. +type TransportCredentialsWithDialer interface { + grpccredentials.TransportCredentials + Dialer(ctx context.Context, dialEp string) (net.Conn, error) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/cluster.go b/vendor/go.etcd.io/etcd/clientv3/cluster.go new file mode 100644 index 0000000000..ce97e5c85b --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/cluster.go @@ -0,0 +1,141 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/types" + + "google.golang.org/grpc" +) + +type ( + Member pb.Member + MemberListResponse pb.MemberListResponse + MemberAddResponse pb.MemberAddResponse + MemberRemoveResponse pb.MemberRemoveResponse + MemberUpdateResponse pb.MemberUpdateResponse + MemberPromoteResponse pb.MemberPromoteResponse +) + +type Cluster interface { + // MemberList lists the current cluster membership. + MemberList(ctx context.Context) (*MemberListResponse, error) + + // MemberAdd adds a new member into the cluster. + MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + + // MemberAddAsLearner adds a new learner member into the cluster. + MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + + // MemberRemove removes an existing member from the cluster. + MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) + + // MemberUpdate updates the peer addresses of the member. + MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) +} + +type cluster struct { + remote pb.ClusterClient + callOpts []grpc.CallOption +} + +func NewCluster(c *Client) Cluster { + api := &cluster{remote: RetryClusterClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { + api := &cluster{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, false) +} + +func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, true) +} + +func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + + r := &pb.MemberAddRequest{ + PeerURLs: peerAddrs, + IsLearner: isLearner, + } + resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberAddResponse)(resp), nil +} + +func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { + r := &pb.MemberRemoveRequest{ID: id} + resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberRemoveResponse)(resp), nil +} + +func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + + // it is safe to retry on update. + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...) + if err == nil { + return (*MemberUpdateResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { + // it is safe to retry on list. + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...) + if err == nil { + return (*MemberListResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) { + r := &pb.MemberPromoteRequest{ID: id} + resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberPromoteResponse)(resp), nil +} diff --git a/vendor/go.etcd.io/etcd/clientv3/compact_op.go b/vendor/go.etcd.io/etcd/clientv3/compact_op.go new file mode 100644 index 0000000000..5779713d3d --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/compact_op.go @@ -0,0 +1,51 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" +) + +// CompactOp represents a compact operation. +type CompactOp struct { + revision int64 + physical bool +} + +// CompactOption configures compact operation. +type CompactOption func(*CompactOp) + +func (op *CompactOp) applyCompactOpts(opts []CompactOption) { + for _, opt := range opts { + opt(op) + } +} + +// OpCompact wraps slice CompactOption to create a CompactOp. +func OpCompact(rev int64, opts ...CompactOption) CompactOp { + ret := CompactOp{revision: rev} + ret.applyCompactOpts(opts) + return ret +} + +func (op CompactOp) toRequest() *pb.CompactionRequest { + return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} +} + +// WithCompactPhysical makes Compact wait until all compacted entries are +// removed from the etcd server's storage. +func WithCompactPhysical() CompactOption { + return func(op *CompactOp) { op.physical = true } +} diff --git a/vendor/go.etcd.io/etcd/clientv3/compare.go b/vendor/go.etcd.io/etcd/clientv3/compare.go new file mode 100644 index 0000000000..01ed68e942 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/compare.go @@ -0,0 +1,140 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" +) + +type CompareTarget int +type CompareResult int + +const ( + CompareVersion CompareTarget = iota + CompareCreated + CompareModified + CompareValue +) + +type Cmp pb.Compare + +func Compare(cmp Cmp, result string, v interface{}) Cmp { + var r pb.Compare_CompareResult + + switch result { + case "=": + r = pb.Compare_EQUAL + case "!=": + r = pb.Compare_NOT_EQUAL + case ">": + r = pb.Compare_GREATER + case "<": + r = pb.Compare_LESS + default: + panic("Unknown result op") + } + + cmp.Result = r + switch cmp.Target { + case pb.Compare_VALUE: + val, ok := v.(string) + if !ok { + panic("bad compare value") + } + cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)} + case pb.Compare_VERSION: + cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)} + case pb.Compare_CREATE: + cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} + case pb.Compare_MOD: + cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} + case pb.Compare_LEASE: + cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)} + default: + panic("Unknown compare type") + } + return cmp +} + +func Value(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_VALUE} +} + +func Version(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_VERSION} +} + +func CreateRevision(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_CREATE} +} + +func ModRevision(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_MOD} +} + +// LeaseValue compares a key's LeaseID to a value of your choosing. The empty +// LeaseID is 0, otherwise known as `NoLease`. +func LeaseValue(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_LEASE} +} + +// KeyBytes returns the byte slice holding with the comparison key. +func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } + +// WithKeyBytes sets the byte slice for the comparison key. +func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } + +// ValueBytes returns the byte slice holding the comparison value, if any. +func (cmp *Cmp) ValueBytes() []byte { + if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { + return tu.Value + } + return nil +} + +// WithValueBytes sets the byte slice for the comparison's value. +func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } + +// WithRange sets the comparison to scan the range [key, end). +func (cmp Cmp) WithRange(end string) Cmp { + cmp.RangeEnd = []byte(end) + return cmp +} + +// WithPrefix sets the comparison to scan all keys prefixed by the key. +func (cmp Cmp) WithPrefix() Cmp { + cmp.RangeEnd = getPrefix(cmp.Key) + return cmp +} + +// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. +func mustInt64(val interface{}) int64 { + if v, ok := val.(int64); ok { + return v + } + if v, ok := val.(int); ok { + return int64(v) + } + panic("bad value") +} + +// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an +// int64 otherwise. +func mustInt64orLeaseID(val interface{}) int64 { + if v, ok := val.(LeaseID); ok { + return int64(v) + } + return mustInt64(val) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/config.go b/vendor/go.etcd.io/etcd/clientv3/config.go new file mode 100644 index 0000000000..11d447d575 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/config.go @@ -0,0 +1,88 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "crypto/tls" + "time" + + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type Config struct { + // Endpoints is a list of URLs. + Endpoints []string `json:"endpoints"` + + // AutoSyncInterval is the interval to update endpoints with its latest members. + // 0 disables auto-sync. By default auto-sync is disabled. + AutoSyncInterval time.Duration `json:"auto-sync-interval"` + + // DialTimeout is the timeout for failing to establish a connection. + DialTimeout time.Duration `json:"dial-timeout"` + + // DialKeepAliveTime is the time after which client pings the server to see if + // transport is alive. + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` + + // DialKeepAliveTimeout is the time that the client waits for a response for the + // keep-alive probe. If the response is not received in this time, the connection is closed. + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` + + // MaxCallSendMsgSize is the client-side request send limit in bytes. + // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024). + // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallSendMsgSize int + + // MaxCallRecvMsgSize is the client-side response receive limit. + // If 0, it defaults to "math.MaxInt32", because range response can + // easily exceed request send limits. + // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallRecvMsgSize int + + // TLS holds the client secure credentials, if any. + TLS *tls.Config + + // Username is a user name for authentication. + Username string `json:"username"` + + // Password is a password for authentication. + Password string `json:"password"` + + // RejectOldCluster when set will refuse to create a client against an outdated cluster. + RejectOldCluster bool `json:"reject-old-cluster"` + + // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). + // For example, pass "grpc.WithBlock()" to block until the underlying connection is up. + // Without this, Dial returns immediately and connecting the server happens in background. + DialOptions []grpc.DialOption + + // Context is the default client context; it can be used to cancel grpc dial out and + // other operations that do not have an explicit context. + Context context.Context + + // LogConfig configures client-side logger. + // If nil, use the default logger. + // TODO: configure gRPC logger + LogConfig *zap.Config + + // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs). + PermitWithoutStream bool `json:"permit-without-stream"` + + // TODO: support custom balancer picker +} diff --git a/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go b/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go new file mode 100644 index 0000000000..63389c08bf --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go @@ -0,0 +1,173 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credentials implements gRPC credential interface with etcd specific logic. +// e.g., client handshake with custom authority parameter +package credentials + +import ( + "context" + "crypto/tls" + "net" + "sync" + + "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + grpccredentials "google.golang.org/grpc/credentials" +) + +// Config defines gRPC credential configuration. +type Config struct { + TLSConfig *tls.Config +} + +// Bundle defines gRPC credential interface. +type Bundle interface { + grpccredentials.Bundle + UpdateAuthToken(token string) +} + +// NewBundle constructs a new gRPC credential bundle. +func NewBundle(cfg Config) Bundle { + return &bundle{ + tc: newTransportCredential(cfg.TLSConfig), + rc: newPerRPCCredential(), + } +} + +// bundle implements "grpccredentials.Bundle" interface. +type bundle struct { + tc *transportCredential + rc *perRPCCredential +} + +func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials { + return b.tc +} + +func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials { + return b.rc +} + +func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) { + // no-op + return nil, nil +} + +// transportCredential implements "grpccredentials.TransportCredentials" interface. +// transportCredential wraps TransportCredentials to track which +// addresses are dialed for which endpoints, and then sets the authority when checking the endpoint's cert to the +// hostname or IP of the dialed endpoint. +// This is a workaround of a gRPC load balancer issue. gRPC uses the dialed target's service name as the authority when +// checking all endpoint certs, which does not work for etcd servers using their hostname or IP as the Subject Alternative Name +// in their TLS certs. +// To enable, include both WithTransportCredentials(creds) and WithContextDialer(creds.Dialer) +// when dialing. +type transportCredential struct { + gtc grpccredentials.TransportCredentials + mu sync.Mutex + // addrToEndpoint maps from the connection addresses that are dialed to the hostname or IP of the + // endpoint provided to the dialer when dialing + addrToEndpoint map[string]string +} + +func newTransportCredential(cfg *tls.Config) *transportCredential { + return &transportCredential{ + gtc: grpccredentials.NewTLS(cfg), + addrToEndpoint: map[string]string{}, + } +} + +func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) { + // Set the authority when checking the endpoint's cert to the hostname or IP of the dialed endpoint + tc.mu.Lock() + dialEp, ok := tc.addrToEndpoint[rawConn.RemoteAddr().String()] + tc.mu.Unlock() + if ok { + _, host, _ := endpoint.ParseEndpoint(dialEp) + authority = host + } + return tc.gtc.ClientHandshake(ctx, authority, rawConn) +} + +// return true if given string is an IP. +func isIP(ep string) bool { + return net.ParseIP(ep) != nil +} + +func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) { + return tc.gtc.ServerHandshake(rawConn) +} + +func (tc *transportCredential) Info() grpccredentials.ProtocolInfo { + return tc.gtc.Info() +} + +func (tc *transportCredential) Clone() grpccredentials.TransportCredentials { + copy := map[string]string{} + tc.mu.Lock() + for k, v := range tc.addrToEndpoint { + copy[k] = v + } + tc.mu.Unlock() + return &transportCredential{ + gtc: tc.gtc.Clone(), + addrToEndpoint: copy, + } +} + +func (tc *transportCredential) OverrideServerName(serverNameOverride string) error { + return tc.gtc.OverrideServerName(serverNameOverride) +} + +func (tc *transportCredential) Dialer(ctx context.Context, dialEp string) (net.Conn, error) { + // Keep track of which addresses are dialed for which endpoints + conn, err := endpoint.Dialer(ctx, dialEp) + if conn != nil { + tc.mu.Lock() + tc.addrToEndpoint[conn.RemoteAddr().String()] = dialEp + tc.mu.Unlock() + } + return conn, err +} + +// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface. +type perRPCCredential struct { + authToken string + authTokenMu sync.RWMutex +} + +func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} } + +func (rc *perRPCCredential) RequireTransportSecurity() bool { return false } + +func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { + rc.authTokenMu.RLock() + authToken := rc.authToken + rc.authTokenMu.RUnlock() + return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil +} + +func (b *bundle) UpdateAuthToken(token string) { + if b.rc == nil { + return + } + b.rc.UpdateAuthToken(token) +} + +func (rc *perRPCCredential) UpdateAuthToken(token string) { + rc.authTokenMu.Lock() + rc.authToken = token + rc.authTokenMu.Unlock() +} diff --git a/vendor/go.etcd.io/etcd/clientv3/ctx.go b/vendor/go.etcd.io/etcd/clientv3/ctx.go new file mode 100644 index 0000000000..542219837b --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/ctx.go @@ -0,0 +1,64 @@ +// Copyright 2020 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "strings" + + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/version" + "google.golang.org/grpc/metadata" +) + +// WithRequireLeader requires client requests to only succeed +// when the cluster has a leader. +func WithRequireLeader(ctx context.Context) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { // no outgoing metadata ctx key, create one + md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) + return metadata.NewOutgoingContext(ctx, md) + } + copied := md.Copy() // avoid racey updates + // overwrite/add 'hasleader' key/value + metadataSet(copied, rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) + return metadata.NewOutgoingContext(ctx, copied) +} + +// embeds client version +func withVersion(ctx context.Context) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { // no outgoing metadata ctx key, create one + md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion) + return metadata.NewOutgoingContext(ctx, md) + } + copied := md.Copy() // avoid racey updates + // overwrite/add version key/value + metadataSet(copied, rpctypes.MetadataClientAPIVersionKey, version.APIVersion) + return metadata.NewOutgoingContext(ctx, copied) +} + +func metadataGet(md metadata.MD, k string) []string { + k = strings.ToLower(k) + return md[k] +} + +func metadataSet(md metadata.MD, k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} diff --git a/vendor/go.etcd.io/etcd/clientv3/doc.go b/vendor/go.etcd.io/etcd/clientv3/doc.go new file mode 100644 index 0000000000..913cd28255 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/doc.go @@ -0,0 +1,106 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package clientv3 implements the official Go etcd client for v3. +// +// Create client using `clientv3.New`: +// +// // expect dial time-out on ipv4 blackhole +// _, err := clientv3.New(clientv3.Config{ +// Endpoints: []string{"http://254.0.0.1:12345"}, +// DialTimeout: 2 * time.Second, +// }) +// +// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3 +// if err == context.DeadlineExceeded { +// // handle errors +// } +// +// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1 +// if err == grpc.ErrClientConnTimeout { +// // handle errors +// } +// +// cli, err := clientv3.New(clientv3.Config{ +// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, +// DialTimeout: 5 * time.Second, +// }) +// if err != nil { +// // handle error! +// } +// defer cli.Close() +// +// Make sure to close the client after using it. If the client is not closed, the +// connection will have leaky goroutines. +// +// To specify a client request timeout, wrap the context with context.WithTimeout: +// +// ctx, cancel := context.WithTimeout(context.Background(), timeout) +// resp, err := kvc.Put(ctx, "sample_key", "sample_value") +// cancel() +// if err != nil { +// // handle error! +// } +// // use the response +// +// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed. +// Clients are safe for concurrent use by multiple goroutines. +// +// etcd client returns 3 types of errors: +// +// 1. context error: canceled or deadline exceeded. +// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded. +// 3. gRPC error: see https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go +// +// Here is the example code to handle client errors: +// +// resp, err := kvc.Put(ctx, "", "") +// if err != nil { +// if err == context.Canceled { +// // ctx is canceled by another routine +// } else if err == context.DeadlineExceeded { +// // ctx is attached with a deadline and it exceeded +// } else if err == rpctypes.ErrEmptyKey { +// // client-side error: key is not provided +// } else if ev, ok := status.FromError(err); ok { +// code := ev.Code() +// if code == codes.DeadlineExceeded { +// // server-side context might have timed-out first (due to clock skew) +// // while original client-side context is not timed-out yet +// } +// } else { +// // bad cluster endpoints, which are not etcd servers +// } +// } +// +// go func() { cli.Close() }() +// _, err := kvc.Get(ctx, "a") +// if err != nil { +// // with etcd clientv3 <= v3.3 +// if err == context.Canceled { +// // grpc balancer calls 'Get' with an inflight client.Close +// } else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x +// // grpc balancer calls 'Get' after client.Close. +// } +// // with etcd clientv3 >= v3.4 +// if clientv3.IsConnCanceled(err) { +// // gRPC client connection is closed +// } +// } +// +// The grpc load balancer is registered statically and is shared across etcd clients. +// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment +// variable. E.g. "ETCD_CLIENT_DEBUG=1". +// +package clientv3 diff --git a/vendor/go.etcd.io/etcd/clientv3/kv.go b/vendor/go.etcd.io/etcd/clientv3/kv.go new file mode 100644 index 0000000000..2b7864ad8b --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/kv.go @@ -0,0 +1,177 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc" +) + +type ( + CompactResponse pb.CompactionResponse + PutResponse pb.PutResponse + GetResponse pb.RangeResponse + DeleteResponse pb.DeleteRangeResponse + TxnResponse pb.TxnResponse +) + +type KV interface { + // Put puts a key-value pair into etcd. + // Note that key,value can be plain bytes array and string is + // an immutable representation of that bytes array. + // To get a string of bytes, do string([]byte{0x10, 0x20}). + Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) + + // Get retrieves keys. + // By default, Get will return the value for "key", if any. + // When passed WithRange(end), Get will return the keys in the range [key, end). + // When passed WithFromKey(), Get returns keys greater than or equal to key. + // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision; + // if the required revision is compacted, the request will fail with ErrCompacted . + // When passed WithLimit(limit), the number of returned keys is bounded by limit. + // When passed WithSort(), the keys will be sorted. + Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) + + // Delete deletes a key, or optionally using WithRange(end), [key, end). + Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) + + // Compact compacts etcd KV history before the given rev. + Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) + + // Do applies a single Op on KV without a transaction. + // Do is useful when creating arbitrary operations to be issued at a + // later time; the user can range over the operations, calling Do to + // execute them. Get/Put/Delete, on the other hand, are best suited + // for when the operation should be issued at the time of declaration. + Do(ctx context.Context, op Op) (OpResponse, error) + + // Txn creates a transaction. + Txn(ctx context.Context) Txn +} + +type OpResponse struct { + put *PutResponse + get *GetResponse + del *DeleteResponse + txn *TxnResponse +} + +func (op OpResponse) Put() *PutResponse { return op.put } +func (op OpResponse) Get() *GetResponse { return op.get } +func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } + +func (resp *PutResponse) OpResponse() OpResponse { + return OpResponse{put: resp} +} +func (resp *GetResponse) OpResponse() OpResponse { + return OpResponse{get: resp} +} +func (resp *DeleteResponse) OpResponse() OpResponse { + return OpResponse{del: resp} +} +func (resp *TxnResponse) OpResponse() OpResponse { + return OpResponse{txn: resp} +} + +type kv struct { + remote pb.KVClient + callOpts []grpc.CallOption +} + +func NewKV(c *Client) KV { + api := &kv{remote: RetryKVClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { + api := &kv{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { + r, err := kv.Do(ctx, OpPut(key, val, opts...)) + return r.put, toErr(ctx, err) +} + +func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { + r, err := kv.Do(ctx, OpGet(key, opts...)) + return r.get, toErr(ctx, err) +} + +func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { + r, err := kv.Do(ctx, OpDelete(key, opts...)) + return r.del, toErr(ctx, err) +} + +func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { + resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*CompactResponse)(resp), err +} + +func (kv *kv) Txn(ctx context.Context) Txn { + return &txn{ + kv: kv, + ctx: ctx, + callOpts: kv.callOpts, + } +} + +func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { + var err error + switch op.t { + case tRange: + var resp *pb.RangeResponse + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) + if err == nil { + return OpResponse{get: (*GetResponse)(resp)}, nil + } + case tPut: + var resp *pb.PutResponse + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + resp, err = kv.remote.Put(ctx, r, kv.callOpts...) + if err == nil { + return OpResponse{put: (*PutResponse)(resp)}, nil + } + case tDeleteRange: + var resp *pb.DeleteRangeResponse + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...) + if err == nil { + return OpResponse{del: (*DeleteResponse)(resp)}, nil + } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } + default: + panic("Unknown op") + } + return OpResponse{}, toErr(ctx, err) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/lease.go b/vendor/go.etcd.io/etcd/clientv3/lease.go new file mode 100644 index 0000000000..c2796fc969 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/lease.go @@ -0,0 +1,596 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + "time" + + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +type ( + LeaseRevokeResponse pb.LeaseRevokeResponse + LeaseID int64 +) + +// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. +type LeaseGrantResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 + Error string +} + +// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. +type LeaseKeepAliveResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 +} + +// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. +type LeaseTimeToLiveResponse struct { + *pb.ResponseHeader + ID LeaseID `json:"id"` + + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1. + TTL int64 `json:"ttl"` + + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `json:"granted-ttl"` + + // Keys is the list of keys attached to this lease. + Keys [][]byte `json:"keys"` +} + +// LeaseStatus represents a lease status. +type LeaseStatus struct { + ID LeaseID `json:"id"` + // TODO: TTL int64 +} + +// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse. +type LeaseLeasesResponse struct { + *pb.ResponseHeader + Leases []LeaseStatus `json:"leases"` +} + +const ( + // defaultTTL is the assumed lease TTL used for the first keepalive + // deadline before the actual TTL is known to the client. + defaultTTL = 5 * time.Second + // NoLease is a lease ID for the absence of a lease. + NoLease LeaseID = 0 + + // retryConnWait is how long to wait before retrying request due to an error + retryConnWait = 500 * time.Millisecond +) + +// LeaseResponseChSize is the size of buffer to store unsent lease responses. +// WARNING: DO NOT UPDATE. +// Only for testing purposes. +var LeaseResponseChSize = 16 + +// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. +// +// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected. +type ErrKeepAliveHalted struct { + Reason error +} + +func (e ErrKeepAliveHalted) Error() string { + s := "etcdclient: leases keep alive halted" + if e.Reason != nil { + s += ": " + e.Reason.Error() + } + return s +} + +type Lease interface { + // Grant creates a new lease. + Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) + + // Revoke revokes the given lease. + Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) + + // TimeToLive retrieves the lease information of the given lease ID. + TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) + + // Leases retrieves all leases. + Leases(ctx context.Context) (*LeaseLeasesResponse, error) + + // KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted + // to the channel are not consumed promptly the channel may become full. When full, the lease + // client will continue sending keep alive requests to the etcd server, but will drop responses + // until there is capacity on the channel to send more responses. + // + // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or + // canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error + // containing the error reason. + // + // The returned "LeaseKeepAliveResponse" channel closes if underlying keep + // alive stream is interrupted in some way the client cannot handle itself; + // given context "ctx" is canceled or timed out. + // + // TODO(v4.0): post errors to last keep alive message before closing + // (see https://github.com/etcd-io/etcd/pull/7866) + KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) + + // KeepAliveOnce renews the lease once. The response corresponds to the + // first message from calling KeepAlive. If the response has a recoverable + // error, KeepAliveOnce will retry the RPC with a new keep alive message. + // + // In most of the cases, Keepalive should be used instead of KeepAliveOnce. + KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) + + // Close releases all resources Lease keeps for efficient communication + // with the etcd server. + Close() error +} + +type lessor struct { + mu sync.Mutex // guards all fields + + // donec is closed and loopErr is set when recvKeepAliveLoop stops + donec chan struct{} + loopErr error + + remote pb.LeaseClient + + stream pb.Lease_LeaseKeepAliveClient + streamCancel context.CancelFunc + + stopCtx context.Context + stopCancel context.CancelFunc + + keepAlives map[LeaseID]*keepAlive + + // firstKeepAliveTimeout is the timeout for the first keepalive request + // before the actual TTL is known to the lease client + firstKeepAliveTimeout time.Duration + + // firstKeepAliveOnce ensures stream starts after first KeepAlive call. + firstKeepAliveOnce sync.Once + + callOpts []grpc.CallOption + + lg *zap.Logger +} + +// keepAlive multiplexes a keepalive for a lease over multiple channels +type keepAlive struct { + chs []chan<- *LeaseKeepAliveResponse + ctxs []context.Context + // deadline is the time the keep alive channels close if no response + deadline time.Time + // nextKeepAlive is when to send the next keep alive message + nextKeepAlive time.Time + // donec is closed on lease revoke, expiration, or cancel. + donec chan struct{} +} + +func NewLease(c *Client) Lease { + return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second) +} + +func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease { + l := &lessor{ + donec: make(chan struct{}), + keepAlives: make(map[LeaseID]*keepAlive), + remote: remote, + firstKeepAliveTimeout: keepAliveTimeout, + lg: c.lg, + } + if l.firstKeepAliveTimeout == time.Second { + l.firstKeepAliveTimeout = defaultTTL + } + if c != nil { + l.callOpts = c.callOpts + } + reqLeaderCtx := WithRequireLeader(context.Background()) + l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) + return l +} + +func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { + r := &pb.LeaseGrantRequest{TTL: ttl} + resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, + } + return gresp, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...) + if err == nil { + return (*LeaseRevokeResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, + } + return gresp, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { + resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...) + if err == nil { + leases := make([]LeaseStatus, len(resp.Leases)) + for i := range resp.Leases { + leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)} + } + return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { + ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize) + + l.mu.Lock() + // ensure that recvKeepAliveLoop is still running + select { + case <-l.donec: + err := l.loopErr + l.mu.Unlock() + close(ch) + return ch, ErrKeepAliveHalted{Reason: err} + default: + } + ka, ok := l.keepAlives[id] + if !ok { + // create fresh keep alive + ka = &keepAlive{ + chs: []chan<- *LeaseKeepAliveResponse{ch}, + ctxs: []context.Context{ctx}, + deadline: time.Now().Add(l.firstKeepAliveTimeout), + nextKeepAlive: time.Now(), + donec: make(chan struct{}), + } + l.keepAlives[id] = ka + } else { + // add channel and context to existing keep alive + ka.ctxs = append(ka.ctxs, ctx) + ka.chs = append(ka.chs, ch) + } + l.mu.Unlock() + + go l.keepAliveCtxCloser(ctx, id, ka.donec) + l.firstKeepAliveOnce.Do(func() { + go l.recvKeepAliveLoop() + go l.deadlineLoop() + }) + + return ch, nil +} + +func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + for { + resp, err := l.keepAliveOnce(ctx, id) + if err == nil { + if resp.TTL <= 0 { + err = rpctypes.ErrLeaseNotFound + } + return resp, err + } + if isHaltErr(ctx, err) { + return nil, toErr(ctx, err) + } + } +} + +func (l *lessor) Close() error { + l.stopCancel() + // close for synchronous teardown if stream goroutines never launched + l.firstKeepAliveOnce.Do(func() { close(l.donec) }) + <-l.donec + return nil +} + +func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) { + select { + case <-donec: + return + case <-l.donec: + return + case <-ctx.Done(): + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[id] + if !ok { + return + } + + // close channel and remove context if still associated with keep alive + for i, c := range ka.ctxs { + if c == ctx { + close(ka.chs[i]) + ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) + ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) + break + } + } + // remove if no one more listeners + if len(ka.chs) == 0 { + delete(l.keepAlives, id) + } +} + +// closeRequireLeader scans keepAlives for ctxs that have require leader +// and closes the associated channels. +func (l *lessor) closeRequireLeader() { + l.mu.Lock() + defer l.mu.Unlock() + for _, ka := range l.keepAlives { + reqIdxs := 0 + // find all required leader channels, close, mark as nil + for i, ctx := range ka.ctxs { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + continue + } + ks := md[rpctypes.MetadataRequireLeaderKey] + if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { + continue + } + close(ka.chs[i]) + ka.chs[i] = nil + reqIdxs++ + } + if reqIdxs == 0 { + continue + } + // remove all channels that required a leader from keepalive + newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) + newCtxs := make([]context.Context, len(newChs)) + newIdx := 0 + for i := range ka.chs { + if ka.chs[i] == nil { + continue + } + newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] + newIdx++ + } + ka.chs, ka.ctxs = newChs, newCtxs + } +} + +func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + + err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) + if err != nil { + return nil, toErr(ctx, err) + } + + resp, rerr := stream.Recv() + if rerr != nil { + return nil, toErr(ctx, rerr) + } + + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + return karesp, nil +} + +func (l *lessor) recvKeepAliveLoop() (gerr error) { + defer func() { + l.mu.Lock() + close(l.donec) + l.loopErr = gerr + for _, ka := range l.keepAlives { + ka.close() + } + l.keepAlives = make(map[LeaseID]*keepAlive) + l.mu.Unlock() + }() + + for { + stream, err := l.resetRecv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + } else { + for { + resp, err := stream.Recv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + + if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + l.closeRequireLeader() + } + break + } + + l.recvKeepAlive(resp) + } + } + + select { + case <-time.After(retryConnWait): + case <-l.stopCtx.Done(): + return l.stopCtx.Err() + } + } +} + +// resetRecv opens a new lease stream and starts sending keep alive requests. +func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { + sctx, cancel := context.WithCancel(l.stopCtx) + stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...) + if err != nil { + cancel() + return nil, err + } + + l.mu.Lock() + defer l.mu.Unlock() + if l.stream != nil && l.streamCancel != nil { + l.streamCancel() + } + + l.streamCancel = cancel + l.stream = stream + + go l.sendKeepAliveLoop(stream) + return stream, nil +} + +// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse +func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[karesp.ID] + if !ok { + return + } + + if karesp.TTL <= 0 { + // lease expired; close all keep alive channels + delete(l.keepAlives, karesp.ID) + ka.close() + return + } + + // send update to all channels + nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) + ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) + for _, ch := range ka.chs { + select { + case ch <- karesp: + default: + if l.lg != nil { + l.lg.Warn("lease keepalive response queue is full; dropping response send", + zap.Int("queue-size", len(ch)), + zap.Int("queue-capacity", cap(ch)), + ) + } + } + // still advance in order to rate-limit keep-alive sends + ka.nextKeepAlive = nextKeepAlive + } +} + +// deadlineLoop reaps any keep alive channels that have not received a response +// within the lease TTL +func (l *lessor) deadlineLoop() { + for { + select { + case <-time.After(time.Second): + case <-l.donec: + return + } + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.deadline.Before(now) { + // waited too long for response; lease may be expired + ka.close() + delete(l.keepAlives, id) + } + } + l.mu.Unlock() + } +} + +// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. +func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { + for { + var tosend []LeaseID + + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.nextKeepAlive.Before(now) { + tosend = append(tosend, id) + } + } + l.mu.Unlock() + + for _, id := range tosend { + r := &pb.LeaseKeepAliveRequest{ID: int64(id)} + if err := stream.Send(r); err != nil { + // TODO do something with this error? + return + } + } + + select { + case <-time.After(retryConnWait): + case <-stream.Context().Done(): + return + case <-l.donec: + return + case <-l.stopCtx.Done(): + return + } + } +} + +func (ka *keepAlive) close() { + close(ka.donec) + for _, ch := range ka.chs { + close(ch) + } +} diff --git a/vendor/go.etcd.io/etcd/clientv3/logger.go b/vendor/go.etcd.io/etcd/clientv3/logger.go new file mode 100644 index 0000000000..f5ae0109da --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/logger.go @@ -0,0 +1,101 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "io/ioutil" + "sync" + + "go.etcd.io/etcd/pkg/logutil" + + "google.golang.org/grpc/grpclog" +) + +var ( + lgMu sync.RWMutex + lg logutil.Logger +) + +type settableLogger struct { + l grpclog.LoggerV2 + mu sync.RWMutex +} + +func init() { + // disable client side logs by default + lg = &settableLogger{} + SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) +} + +// SetLogger sets client-side Logger. +func SetLogger(l grpclog.LoggerV2) { + lgMu.Lock() + lg = logutil.NewLogger(l) + // override grpclog so that any changes happen with locking + grpclog.SetLoggerV2(lg) + lgMu.Unlock() +} + +// GetLogger returns the current logutil.Logger. +func GetLogger() logutil.Logger { + lgMu.RLock() + l := lg + lgMu.RUnlock() + return l +} + +// NewLogger returns a new Logger with logutil.Logger. +func NewLogger(gl grpclog.LoggerV2) logutil.Logger { + return &settableLogger{l: gl} +} + +func (s *settableLogger) get() grpclog.LoggerV2 { + s.mu.RLock() + l := s.l + s.mu.RUnlock() + return l +} + +// implement the grpclog.LoggerV2 interface + +func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } +func (s *settableLogger) Warningf(format string, args ...interface{}) { + s.get().Warningf(format, args...) +} +func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } +func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } +func (s *settableLogger) Errorf(format string, args ...interface{}) { + s.get().Errorf(format, args...) +} +func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } +func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } +func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } +func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } +func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) V(l int) bool { return s.get().V(l) } +func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 { + s.mu.RLock() + l := s.l + s.mu.RUnlock() + if l.V(lvl) { + return s + } + return logutil.NewDiscardLogger() +} diff --git a/vendor/go.etcd.io/etcd/clientv3/maintenance.go b/vendor/go.etcd.io/etcd/clientv3/maintenance.go new file mode 100644 index 0000000000..809b8a3b4b --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/maintenance.go @@ -0,0 +1,243 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "io" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.uber.org/zap" + + "google.golang.org/grpc" +) + +type ( + DefragmentResponse pb.DefragmentResponse + AlarmResponse pb.AlarmResponse + AlarmMember pb.AlarmMember + StatusResponse pb.StatusResponse + HashKVResponse pb.HashKVResponse + MoveLeaderResponse pb.MoveLeaderResponse +) + +type Maintenance interface { + // AlarmList gets all active alarms. + AlarmList(ctx context.Context) (*AlarmResponse, error) + + // AlarmDisarm disarms a given alarm. + AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) + + // Defragment releases wasted space from internal fragmentation on a given etcd member. + // Defragment is only needed when deleting a large number of keys and want to reclaim + // the resources. + // Defragment is an expensive operation. User should avoid defragmenting multiple members + // at the same time. + // To defragment multiple members in the cluster, user need to call defragment multiple + // times with different endpoints. + Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) + + // Status gets the status of the endpoint. + Status(ctx context.Context, endpoint string) (*StatusResponse, error) + + // HashKV returns a hash of the KV state at the time of the RPC. + // If revision is zero, the hash is computed on all keys. If the revision + // is non-zero, the hash is computed on all keys at or below the given revision. + HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) + + // Snapshot provides a reader for a point-in-time snapshot of etcd. + // If the context "ctx" is canceled or timed out, reading from returned + // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). + Snapshot(ctx context.Context) (io.ReadCloser, error) + + // MoveLeader requests current leader to transfer its leadership to the transferee. + // Request must be made to the leader. + MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) +} + +type maintenance struct { + lg *zap.Logger + dial func(endpoint string) (pb.MaintenanceClient, func(), error) + remote pb.MaintenanceClient + callOpts []grpc.CallOption +} + +func NewMaintenance(c *Client) Maintenance { + api := &maintenance{ + lg: c.lg, + dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { + conn, err := c.Dial(endpoint) + if err != nil { + return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err) + } + cancel := func() { conn.Close() } + return RetryMaintenanceClient(c, conn), cancel, nil + }, + remote: RetryMaintenanceClient(c, c.conn), + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { + api := &maintenance{ + lg: c.lg, + dial: func(string) (pb.MaintenanceClient, func(), error) { + return remote, func() {}, nil + }, + remote: remote, + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_GET, + MemberID: 0, // all + Alarm: pb.AlarmType_NONE, // all + } + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_DEACTIVATE, + MemberID: am.MemberID, + Alarm: am.Alarm, + } + + if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { + ar, err := m.AlarmList(ctx) + if err != nil { + return nil, toErr(ctx, err) + } + ret := AlarmResponse{} + for _, am := range ar.Alarms { + dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) + if derr != nil { + return nil, toErr(ctx, derr) + } + ret.Alarms = append(ret.Alarms, dresp.Alarms...) + } + return &ret, nil + } + + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*DefragmentResponse)(resp), nil +} + +func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*StatusResponse)(resp), nil +} + +func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*HashKVResponse)(resp), nil +} + +func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) + if err != nil { + return nil, toErr(ctx, err) + } + + m.lg.Info("opened snapshot stream; downloading") + pr, pw := io.Pipe() + go func() { + for { + resp, err := ss.Recv() + if err != nil { + switch err { + case io.EOF: + m.lg.Info("completed snapshot read; closing") + default: + m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err)) + } + pw.CloseWithError(err) + return + } + + // can "resp == nil && err == nil" + // before we receive snapshot SHA digest? + // No, server sends EOF with an empty response + // after it sends SHA digest at the end + + if _, werr := pw.Write(resp.Blob); werr != nil { + pw.CloseWithError(werr) + return + } + } + }() + return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil +} + +type snapshotReadCloser struct { + ctx context.Context + io.ReadCloser +} + +func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) { + n, err = rc.ReadCloser.Read(p) + return n, toErr(rc.ctx, err) +} + +func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { + resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) + return (*MoveLeaderResponse)(resp), toErr(ctx, err) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/op.go b/vendor/go.etcd.io/etcd/clientv3/op.go new file mode 100644 index 0000000000..81ae31fd8f --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/op.go @@ -0,0 +1,560 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + +type opType int + +const ( + // A default Op has opType 0, which is invalid. + tRange opType = iota + 1 + tPut + tDeleteRange + tTxn +) + +var noPrefixEnd = []byte{0} + +// Op represents an Operation that kv can execute. +type Op struct { + t opType + key []byte + end []byte + + // for range + limit int64 + sort *SortOption + serializable bool + keysOnly bool + countOnly bool + minModRev int64 + maxModRev int64 + minCreateRev int64 + maxCreateRev int64 + + // for range, watch + rev int64 + + // for watch, put, delete + prevKV bool + + // for watch + // fragmentation should be disabled by default + // if true, split watch events when total exceeds + // "--max-request-bytes" flag value + 512-byte + fragment bool + + // for put + ignoreValue bool + ignoreLease bool + + // progressNotify is for progress updates. + progressNotify bool + // createdNotify is for created event + createdNotify bool + // filters for watchers + filterPut bool + filterDelete bool + + // for put + val []byte + leaseID LeaseID + + // txn + cmps []Cmp + thenOps []Op + elseOps []Op +} + +// accessors / mutators + +// IsTxn returns true if the "Op" type is transaction. +func (op Op) IsTxn() bool { + return op.t == tTxn +} + +// Txn returns the comparison(if) operations, "then" operations, and "else" operations. +func (op Op) Txn() ([]Cmp, []Op, []Op) { + return op.cmps, op.thenOps, op.elseOps +} + +// KeyBytes returns the byte slice holding the Op's key. +func (op Op) KeyBytes() []byte { return op.key } + +// WithKeyBytes sets the byte slice for the Op's key. +func (op *Op) WithKeyBytes(key []byte) { op.key = key } + +// RangeBytes returns the byte slice holding with the Op's range end, if any. +func (op Op) RangeBytes() []byte { return op.end } + +// Rev returns the requested revision, if any. +func (op Op) Rev() int64 { return op.rev } + +// IsPut returns true iff the operation is a Put. +func (op Op) IsPut() bool { return op.t == tPut } + +// IsGet returns true iff the operation is a Get. +func (op Op) IsGet() bool { return op.t == tRange } + +// IsDelete returns true iff the operation is a Delete. +func (op Op) IsDelete() bool { return op.t == tDeleteRange } + +// IsSerializable returns true if the serializable field is true. +func (op Op) IsSerializable() bool { return op.serializable } + +// IsKeysOnly returns whether keysOnly is set. +func (op Op) IsKeysOnly() bool { return op.keysOnly } + +// IsCountOnly returns whether countOnly is set. +func (op Op) IsCountOnly() bool { return op.countOnly } + +// MinModRev returns the operation's minimum modify revision. +func (op Op) MinModRev() int64 { return op.minModRev } + +// MaxModRev returns the operation's maximum modify revision. +func (op Op) MaxModRev() int64 { return op.maxModRev } + +// MinCreateRev returns the operation's minimum create revision. +func (op Op) MinCreateRev() int64 { return op.minCreateRev } + +// MaxCreateRev returns the operation's maximum create revision. +func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } + +// WithRangeBytes sets the byte slice for the Op's range end. +func (op *Op) WithRangeBytes(end []byte) { op.end = end } + +// ValueBytes returns the byte slice holding the Op's value, if any. +func (op Op) ValueBytes() []byte { return op.val } + +// WithValueBytes sets the byte slice for the Op's value. +func (op *Op) WithValueBytes(v []byte) { op.val = v } + +func (op Op) toRangeRequest() *pb.RangeRequest { + if op.t != tRange { + panic("op.t != tRange") + } + r := &pb.RangeRequest{ + Key: op.key, + RangeEnd: op.end, + Limit: op.limit, + Revision: op.rev, + Serializable: op.serializable, + KeysOnly: op.keysOnly, + CountOnly: op.countOnly, + MinModRevision: op.minModRev, + MaxModRevision: op.maxModRev, + MinCreateRevision: op.minCreateRev, + MaxCreateRevision: op.maxCreateRev, + } + if op.sort != nil { + r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) + r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) + } + return r +} + +func (op Op) toTxnRequest() *pb.TxnRequest { + thenOps := make([]*pb.RequestOp, len(op.thenOps)) + for i, tOp := range op.thenOps { + thenOps[i] = tOp.toRequestOp() + } + elseOps := make([]*pb.RequestOp, len(op.elseOps)) + for i, eOp := range op.elseOps { + elseOps[i] = eOp.toRequestOp() + } + cmps := make([]*pb.Compare, len(op.cmps)) + for i := range op.cmps { + cmps[i] = (*pb.Compare)(&op.cmps[i]) + } + return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} +} + +func (op Op) toRequestOp() *pb.RequestOp { + switch op.t { + case tRange: + return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} + case tPut: + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} + case tDeleteRange: + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} + case tTxn: + return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}} + default: + panic("Unknown Op") + } +} + +func (op Op) isWrite() bool { + if op.t == tTxn { + for _, tOp := range op.thenOps { + if tOp.isWrite() { + return true + } + } + for _, tOp := range op.elseOps { + if tOp.isWrite() { + return true + } + } + return false + } + return op.t != tRange +} + +// OpGet returns "get" operation based on given key and operation options. +func OpGet(key string, opts ...OpOption) Op { + // WithPrefix and WithFromKey are not supported together + if isWithPrefix(opts) && isWithFromKey(opts) { + panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") + } + ret := Op{t: tRange, key: []byte(key)} + ret.applyOpts(opts) + return ret +} + +// OpDelete returns "delete" operation based on given key and operation options. +func OpDelete(key string, opts ...OpOption) Op { + // WithPrefix and WithFromKey are not supported together + if isWithPrefix(opts) && isWithFromKey(opts) { + panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") + } + ret := Op{t: tDeleteRange, key: []byte(key)} + ret.applyOpts(opts) + switch { + case ret.leaseID != 0: + panic("unexpected lease in delete") + case ret.limit != 0: + panic("unexpected limit in delete") + case ret.rev != 0: + panic("unexpected revision in delete") + case ret.sort != nil: + panic("unexpected sort in delete") + case ret.serializable: + panic("unexpected serializable in delete") + case ret.countOnly: + panic("unexpected countOnly in delete") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in delete") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in delete") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in delete") + case ret.createdNotify: + panic("unexpected createdNotify in delete") + } + return ret +} + +// OpPut returns "put" operation based on given key-value and operation options. +func OpPut(key, val string, opts ...OpOption) Op { + ret := Op{t: tPut, key: []byte(key), val: []byte(val)} + ret.applyOpts(opts) + switch { + case ret.end != nil: + panic("unexpected range in put") + case ret.limit != 0: + panic("unexpected limit in put") + case ret.rev != 0: + panic("unexpected revision in put") + case ret.sort != nil: + panic("unexpected sort in put") + case ret.serializable: + panic("unexpected serializable in put") + case ret.countOnly: + panic("unexpected countOnly in put") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in put") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in put") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in put") + case ret.createdNotify: + panic("unexpected createdNotify in put") + } + return ret +} + +// OpTxn returns "txn" operation based on given transaction conditions. +func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { + return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} +} + +func opWatch(key string, opts ...OpOption) Op { + ret := Op{t: tRange, key: []byte(key)} + ret.applyOpts(opts) + switch { + case ret.leaseID != 0: + panic("unexpected lease in watch") + case ret.limit != 0: + panic("unexpected limit in watch") + case ret.sort != nil: + panic("unexpected sort in watch") + case ret.serializable: + panic("unexpected serializable in watch") + case ret.countOnly: + panic("unexpected countOnly in watch") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in watch") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in watch") + } + return ret +} + +func (op *Op) applyOpts(opts []OpOption) { + for _, opt := range opts { + opt(op) + } +} + +// OpOption configures Operations like Get, Put, Delete. +type OpOption func(*Op) + +// WithLease attaches a lease ID to a key in 'Put' request. +func WithLease(leaseID LeaseID) OpOption { + return func(op *Op) { op.leaseID = leaseID } +} + +// WithLimit limits the number of results to return from 'Get' request. +// If WithLimit is given a 0 limit, it is treated as no limit. +func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } + +// WithRev specifies the store revision for 'Get' request. +// Or the start revision of 'Watch' request. +func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } } + +// WithSort specifies the ordering in 'Get' request. It requires +// 'WithRange' and/or 'WithPrefix' to be specified too. +// 'target' specifies the target to sort by: key, version, revisions, value. +// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'. +func WithSort(target SortTarget, order SortOrder) OpOption { + return func(op *Op) { + if target == SortByKey && order == SortAscend { + // If order != SortNone, server fetches the entire key-space, + // and then applies the sort and limit, if provided. + // Since by default the server returns results sorted by keys + // in lexicographically ascending order, the client should ignore + // SortOrder if the target is SortByKey. + order = SortNone + } + op.sort = &SortOption{target, order} + } +} + +// GetPrefixRangeEnd gets the range end of the prefix. +// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'. +func GetPrefixRangeEnd(prefix string) string { + return string(getPrefix([]byte(prefix))) +} + +func getPrefix(key []byte) []byte { + end := make([]byte, len(key)) + copy(end, key) + for i := len(end) - 1; i >= 0; i-- { + if end[i] < 0xff { + end[i] = end[i] + 1 + end = end[:i+1] + return end + } + } + // next prefix does not exist (e.g., 0xffff); + // default to WithFromKey policy + return noPrefixEnd +} + +// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate +// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())' +// can return 'foo1', 'foo2', and so on. +func WithPrefix() OpOption { + return func(op *Op) { + if len(op.key) == 0 { + op.key, op.end = []byte{0}, []byte{0} + return + } + op.end = getPrefix(op.key) + } +} + +// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests. +// For example, 'Get' requests with 'WithRange(end)' returns +// the keys in the range [key, end). +// endKey must be lexicographically greater than start key. +func WithRange(endKey string) OpOption { + return func(op *Op) { op.end = []byte(endKey) } +} + +// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests +// to be equal or greater than the key in the argument. +func WithFromKey() OpOption { + return func(op *Op) { + if len(op.key) == 0 { + op.key = []byte{0} + } + op.end = []byte("\x00") + } +} + +// WithSerializable makes 'Get' request serializable. By default, +// it's linearizable. Serializable requests are better for lower latency +// requirement. +func WithSerializable() OpOption { + return func(op *Op) { op.serializable = true } +} + +// WithKeysOnly makes the 'Get' request return only the keys and the corresponding +// values will be omitted. +func WithKeysOnly() OpOption { + return func(op *Op) { op.keysOnly = true } +} + +// WithCountOnly makes the 'Get' request return only the count of keys. +func WithCountOnly() OpOption { + return func(op *Op) { op.countOnly = true } +} + +// WithMinModRev filters out keys for Get with modification revisions less than the given revision. +func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } } + +// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision. +func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } } + +// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision. +func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } } + +// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision. +func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } } + +// WithFirstCreate gets the key with the oldest creation revision in the request range. +func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } + +// WithLastCreate gets the key with the latest creation revision in the request range. +func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) } + +// WithFirstKey gets the lexically first key in the request range. +func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) } + +// WithLastKey gets the lexically last key in the request range. +func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) } + +// WithFirstRev gets the key with the oldest modification revision in the request range. +func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) } + +// WithLastRev gets the key with the latest modification revision in the request range. +func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) } + +// withTop gets the first key over the get's prefix given a sort order +func withTop(target SortTarget, order SortOrder) []OpOption { + return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} +} + +// WithProgressNotify makes watch server send periodic progress updates +// every 10 minutes when there is no incoming events. +// Progress updates have zero events in WatchResponse. +func WithProgressNotify() OpOption { + return func(op *Op) { + op.progressNotify = true + } +} + +// WithCreatedNotify makes watch server sends the created event. +func WithCreatedNotify() OpOption { + return func(op *Op) { + op.createdNotify = true + } +} + +// WithFilterPut discards PUT events from the watcher. +func WithFilterPut() OpOption { + return func(op *Op) { op.filterPut = true } +} + +// WithFilterDelete discards DELETE events from the watcher. +func WithFilterDelete() OpOption { + return func(op *Op) { op.filterDelete = true } +} + +// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted, +// nothing will be returned. +func WithPrevKV() OpOption { + return func(op *Op) { + op.prevKV = true + } +} + +// WithFragment to receive raw watch response with fragmentation. +// Fragmentation is disabled by default. If fragmentation is enabled, +// etcd watch server will split watch response before sending to clients +// when the total size of watch events exceed server-side request limit. +// The default server-side request limit is 1.5 MiB, which can be configured +// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes. +// See "etcdserver/api/v3rpc/watch.go" for more details. +func WithFragment() OpOption { + return func(op *Op) { op.fragment = true } +} + +// WithIgnoreValue updates the key using its current value. +// This option can not be combined with non-empty values. +// Returns an error if the key does not exist. +func WithIgnoreValue() OpOption { + return func(op *Op) { + op.ignoreValue = true + } +} + +// WithIgnoreLease updates the key using its current lease. +// This option can not be combined with WithLease. +// Returns an error if the key does not exist. +func WithIgnoreLease() OpOption { + return func(op *Op) { + op.ignoreLease = true + } +} + +// LeaseOp represents an Operation that lease can execute. +type LeaseOp struct { + id LeaseID + + // for TimeToLive + attachedKeys bool +} + +// LeaseOption configures lease operations. +type LeaseOption func(*LeaseOp) + +func (op *LeaseOp) applyOpts(opts []LeaseOption) { + for _, opt := range opts { + opt(op) + } +} + +// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. +func WithAttachedKeys() LeaseOption { + return func(op *LeaseOp) { op.attachedKeys = true } +} + +func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest { + ret := &LeaseOp{id: id} + ret.applyOpts(opts) + return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} +} + +// isWithPrefix returns true if WithPrefix is being called in the op +func isWithPrefix(opts []OpOption) bool { return isOpFuncCalled("WithPrefix", opts) } + +// isWithFromKey returns true if WithFromKey is being called in the op +func isWithFromKey(opts []OpOption) bool { return isOpFuncCalled("WithFromKey", opts) } diff --git a/vendor/go.etcd.io/etcd/clientv3/options.go b/vendor/go.etcd.io/etcd/clientv3/options.go new file mode 100644 index 0000000000..700714c086 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/options.go @@ -0,0 +1,65 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math" + "time" + + "google.golang.org/grpc" +) + +var ( + // client-side handling retrying of request failures where data was not written to the wire or + // where server indicates it did not process the data. gRPC default is default is "FailFast(true)" + // but for etcd we default to "FailFast(false)" to minimize client request error responses due to + // transient failures. + defaultFailFast = grpc.FailFast(false) + + // client-side request send limit, gRPC default is math.MaxInt32 + // Make sure that "client-side send limit < server-side default send/recv limit" + // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes + defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024) + + // client-side response receive limit, gRPC default is 4MB + // Make sure that "client-side receive limit >= server-side default send/recv limit" + // because range response can easily exceed request send limits + // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway + defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32) + + // client-side non-streaming retry limit, only applied to requests where server responds with + // a error code clearly indicating it was unable to process the request such as codes.Unavailable. + // If set to 0, retry is disabled. + defaultUnaryMaxRetries uint = 100 + + // client-side streaming retry limit, only applied to requests where server responds with + // a error code clearly indicating it was unable to process the request such as codes.Unavailable. + // If set to 0, retry is disabled. + defaultStreamMaxRetries = ^uint(0) // max uint + + // client-side retry backoff wait between requests. + defaultBackoffWaitBetween = 25 * time.Millisecond + + // client-side retry backoff default jitter fraction. + defaultBackoffJitterFraction = 0.10 +) + +// defaultCallOpts defines a list of default "gRPC.CallOption". +// Some options are exposed to "clientv3.Config". +// Defaults will be overridden by the settings in "clientv3.Config". +var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize} + +// MaxLeaseTTL is the maximum lease TTL value +const MaxLeaseTTL = 9000000000 diff --git a/vendor/go.etcd.io/etcd/clientv3/retry.go b/vendor/go.etcd.io/etcd/clientv3/retry.go new file mode 100644 index 0000000000..7e855de066 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/retry.go @@ -0,0 +1,298 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type retryPolicy uint8 + +const ( + repeatable retryPolicy = iota + nonRepeatable +) + +func (rp retryPolicy) String() string { + switch rp { + case repeatable: + return "repeatable" + case nonRepeatable: + return "nonRepeatable" + default: + return "UNKNOWN" + } +} + +// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry. +// +// immutable requests (e.g. Get) should be retried unless it's +// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge). +// +// Returning "false" means retry should stop, since client cannot +// handle itself even with retries. +func isSafeRetryImmutableRPC(err error) bool { + eErr := rpctypes.Error(err) + if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { + // interrupted by non-transient server-side or gRPC-side error + // client cannot handle itself (e.g. rpctypes.ErrCompacted) + return false + } + // only retry if unavailable + ev, ok := status.FromError(err) + if !ok { + // all errors from RPC is typed "grpc/status.(*statusError)" + // (ref. https://github.com/grpc/grpc-go/pull/1782) + // + // if the error type is not "grpc/status.(*statusError)", + // it could be from "Dial" + // TODO: do not retry for now + // ref. https://github.com/grpc/grpc-go/issues/1581 + return false + } + return ev.Code() == codes.Unavailable +} + +// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry. +// +// mutable requests (e.g. Put, Delete, Txn) should only be retried +// when the status code is codes.Unavailable when initial connection +// has not been established (no endpoint is up). +// +// Returning "false" means retry should stop, otherwise it violates +// write-at-most-once semantics. +func isSafeRetryMutableRPC(err error) bool { + if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable { + // not safe for mutable RPCs + // e.g. interrupted by non-transient error that client cannot handle itself, + // or transient error while the connection has already been established + return false + } + desc := rpctypes.ErrorDesc(err) + return desc == "there is no address available" || desc == "there is no connection available" +} + +type retryKVClient struct { + kc pb.KVClient +} + +// RetryKVClient implements a KVClient. +func RetryKVClient(c *Client) pb.KVClient { + return &retryKVClient{ + kc: pb.NewKVClient(c.conn), + } +} +func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { + return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { + return rkv.kc.Put(ctx, in, opts...) +} + +func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { + return rkv.kc.DeleteRange(ctx, in, opts...) +} + +func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { + return rkv.kc.Txn(ctx, in, opts...) +} + +func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { + return rkv.kc.Compact(ctx, in, opts...) +} + +type retryLeaseClient struct { + lc pb.LeaseClient +} + +// RetryLeaseClient implements a LeaseClient. +func RetryLeaseClient(c *Client) pb.LeaseClient { + return &retryLeaseClient{ + lc: pb.NewLeaseClient(c.conn), + } +} + +func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { + return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) { + return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { + return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { + return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { + return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...) +} + +type retryClusterClient struct { + cc pb.ClusterClient +} + +// RetryClusterClient implements a ClusterClient. +func RetryClusterClient(c *Client) pb.ClusterClient { + return &retryClusterClient{ + cc: pb.NewClusterClient(c.conn), + } +} + +func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { + return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { + return rcc.cc.MemberAdd(ctx, in, opts...) +} + +func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { + return rcc.cc.MemberRemove(ctx, in, opts...) +} + +func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { + return rcc.cc.MemberUpdate(ctx, in, opts...) +} + +func (rcc *retryClusterClient) MemberPromote(ctx context.Context, in *pb.MemberPromoteRequest, opts ...grpc.CallOption) (resp *pb.MemberPromoteResponse, err error) { + return rcc.cc.MemberPromote(ctx, in, opts...) +} + +type retryMaintenanceClient struct { + mc pb.MaintenanceClient +} + +// RetryMaintenanceClient implements a Maintenance. +func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { + return &retryMaintenanceClient{ + mc: pb.NewMaintenanceClient(conn), + } +} + +func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { + return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { + return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { + return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) { + return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { + return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) { + return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { + return rmc.mc.Defragment(ctx, in, opts...) +} + +type retryAuthClient struct { + ac pb.AuthClient +} + +// RetryAuthClient implements a AuthClient. +func RetryAuthClient(c *Client) pb.AuthClient { + return &retryAuthClient{ + ac: pb.NewAuthClient(c.conn), + } +} + +func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { + return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { + return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { + return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { + return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { + return rac.ac.AuthEnable(ctx, in, opts...) +} + +func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { + return rac.ac.AuthDisable(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { + return rac.ac.UserAdd(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { + return rac.ac.UserDelete(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { + return rac.ac.UserChangePassword(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { + return rac.ac.UserGrantRole(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { + return rac.ac.UserRevokeRole(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { + return rac.ac.RoleAdd(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { + return rac.ac.RoleDelete(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { + return rac.ac.RoleGrantPermission(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { + return rac.ac.RoleRevokePermission(ctx, in, opts...) +} + +func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { + return rac.ac.Authenticate(ctx, in, opts...) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go new file mode 100644 index 0000000000..2c266e55be --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go @@ -0,0 +1,392 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more +// fine grained error checking required by write-at-most-once retry semantics of etcd. + +package clientv3 + +import ( + "context" + "io" + "sync" + "time" + + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// unaryClientInterceptor returns a new retrying unary client interceptor. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ctx = withVersion(ctx) + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return invoker(ctx, method, req, reply, cc, grpcOpts...) + } + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil { + return err + } + logger.Debug( + "retrying of unary invoker", + zap.String("target", cc.Target()), + zap.Uint("attempt", attempt), + ) + lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...) + if lastErr == nil { + return nil + } + logger.Warn( + "retrying of unary invoker failed", + zap.String("target", cc.Target()), + zap.Uint("attempt", attempt), + zap.Error(lastErr), + ) + if isContextError(lastErr) { + if ctx.Err() != nil { + // its the context deadline or cancellation. + return lastErr + } + // its the callCtx deadline or cancellation, in which case try again. + continue + } + if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken { + gterr := c.getToken(ctx) + if gterr != nil { + logger.Warn( + "retrying of unary invoker failed to fetch new auth token", + zap.String("target", cc.Target()), + zap.Error(gterr), + ) + return gterr // lastErr must be invalid auth token + } + continue + } + if !isSafeRetry(c.lg, lastErr, callOpts) { + return lastErr + } + } + return lastErr + } +} + +// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +// +// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs +// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams, +// BidiStreams), the retry interceptor will fail the call. +func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + ctx = withVersion(ctx) + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return streamer(ctx, desc, cc, method, grpcOpts...) + } + if desc.ClientStreams { + return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()") + } + newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...) + if err != nil { + logger.Error("streamer failed to create ClientStream", zap.Error(err)) + return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable? + } + retryingStreamer := &serverStreamingRetryingStream{ + client: c, + ClientStream: newStreamer, + callOpts: callOpts, + ctx: ctx, + streamerCall: func(ctx context.Context) (grpc.ClientStream, error) { + return streamer(ctx, desc, cc, method, grpcOpts...) + }, + } + return retryingStreamer, nil + } +} + +// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a +// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish +// a new ClientStream according to the retry policy. +type serverStreamingRetryingStream struct { + grpc.ClientStream + client *Client + bufferedSends []interface{} // single message that the client can sen + receivedGood bool // indicates whether any prior receives were successful + wasClosedSend bool // indicates that CloseSend was closed + ctx context.Context + callOpts *options + streamerCall func(ctx context.Context) (grpc.ClientStream, error) + mu sync.RWMutex +} + +func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) { + s.mu.Lock() + s.ClientStream = clientStream + s.mu.Unlock() +} + +func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { + s.mu.RLock() + defer s.mu.RUnlock() + return s.ClientStream +} + +func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { + s.mu.Lock() + s.bufferedSends = append(s.bufferedSends, m) + s.mu.Unlock() + return s.getStream().SendMsg(m) +} + +func (s *serverStreamingRetryingStream) CloseSend() error { + s.mu.Lock() + s.wasClosedSend = true + s.mu.Unlock() + return s.getStream().CloseSend() +} + +func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) { + return s.getStream().Header() +} + +func (s *serverStreamingRetryingStream) Trailer() metadata.MD { + return s.getStream().Trailer() +} + +func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { + attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) + if !attemptRetry { + return lastErr // success or hard failure + } + + // We start off from attempt 1, because zeroth was already made on normal SendMsg(). + for attempt := uint(1); attempt < s.callOpts.max; attempt++ { + if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil { + return err + } + newStream, err := s.reestablishStreamAndResendBuffer(s.ctx) + if err != nil { + s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err)) + return err // TODO(mwitkow): Maybe dial and transport errors should be retriable? + } + s.setStream(newStream) + + s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr)) + attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) + if !attemptRetry { + return lastErr + } + } + return lastErr +} + +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { + s.mu.RLock() + wasGood := s.receivedGood + s.mu.RUnlock() + err := s.getStream().RecvMsg(m) + if err == nil || err == io.EOF { + s.mu.Lock() + s.receivedGood = true + s.mu.Unlock() + return false, err + } else if wasGood { + // previous RecvMsg in the stream succeeded, no retry logic should interfere + return false, err + } + if isContextError(err) { + if s.ctx.Err() != nil { + return false, err + } + // its the callCtx deadline or cancellation, in which case try again. + return true, err + } + if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { + gterr := s.client.getToken(s.ctx) + if gterr != nil { + s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr)) + return false, err // return the original error for simplicity + } + return true, err + + } + return isSafeRetry(s.client.lg, err, s.callOpts), err +} + +func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { + s.mu.RLock() + bufferedSends := s.bufferedSends + s.mu.RUnlock() + newStream, err := s.streamerCall(callCtx) + if err != nil { + return nil, err + } + for _, msg := range bufferedSends { + if err := newStream.SendMsg(msg); err != nil { + return nil, err + } + } + if err := newStream.CloseSend(); err != nil { + return nil, err + } + return newStream, nil +} + +func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error { + waitTime := time.Duration(0) + if attempt > 0 { + waitTime = callOpts.backoffFunc(attempt) + } + if waitTime > 0 { + timer := time.NewTimer(waitTime) + select { + case <-ctx.Done(): + timer.Stop() + return contextErrToGrpcErr(ctx.Err()) + case <-timer.C: + } + } + return nil +} + +// isSafeRetry returns "true", if request is safe for retry with the given error. +func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool { + if isContextError(err) { + return false + } + switch callOpts.retryPolicy { + case repeatable: + return isSafeRetryImmutableRPC(err) + case nonRepeatable: + return isSafeRetryMutableRPC(err) + default: + lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) + return false + } +} + +func isContextError(err error) bool { + return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled +} + +func contextErrToGrpcErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Errorf(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Errorf(codes.Canceled, err.Error()) + default: + return status.Errorf(codes.Unknown, err.Error()) + } +} + +var ( + defaultOptions = &options{ + retryPolicy: nonRepeatable, + max: 0, // disable + backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), + retryAuth: true, + } +) + +// backoffFunc denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. +type backoffFunc func(attempt uint) time.Duration + +// withRetryPolicy sets the retry policy of this call. +func withRetryPolicy(rp retryPolicy) retryOption { + return retryOption{applyFunc: func(o *options) { + o.retryPolicy = rp + }} +} + +// withMax sets the maximum number of retries on this call, or this interceptor. +func withMax(maxRetries uint) retryOption { + return retryOption{applyFunc: func(o *options) { + o.max = maxRetries + }} +} + +// WithBackoff sets the `BackoffFunc `used to control time between retries. +func withBackoff(bf backoffFunc) retryOption { + return retryOption{applyFunc: func(o *options) { + o.backoffFunc = bf + }} +} + +type options struct { + retryPolicy retryPolicy + max uint + backoffFunc backoffFunc + retryAuth bool +} + +// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor. +type retryOption struct { + grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic. + applyFunc func(opt *options) +} + +func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options { + if len(retryOptions) == 0 { + return opt + } + optCopy := &options{} + *optCopy = *opt + for _, f := range retryOptions { + f.applyFunc(optCopy) + } + return optCopy +} + +func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) { + for _, opt := range callOptions { + if co, ok := opt.(retryOption); ok { + retryOptions = append(retryOptions, co) + } else { + grpcOptions = append(grpcOptions, opt) + } + } + return grpcOptions, retryOptions +} + +// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). +// +// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. +func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc { + return func(attempt uint) time.Duration { + return jitterUp(waitBetween, jitterFraction) + } +} diff --git a/vendor/go.etcd.io/etcd/clientv3/sort.go b/vendor/go.etcd.io/etcd/clientv3/sort.go new file mode 100644 index 0000000000..2bb9d9a13b --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/sort.go @@ -0,0 +1,37 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +type SortTarget int +type SortOrder int + +const ( + SortNone SortOrder = iota + SortAscend + SortDescend +) + +const ( + SortByKey SortTarget = iota + SortByVersion + SortByCreateRevision + SortByModRevision + SortByValue +) + +type SortOption struct { + Target SortTarget + Order SortOrder +} diff --git a/vendor/go.etcd.io/etcd/clientv3/txn.go b/vendor/go.etcd.io/etcd/clientv3/txn.go new file mode 100644 index 0000000000..c19715da43 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/txn.go @@ -0,0 +1,151 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc" +) + +// Txn is the interface that wraps mini-transactions. +// +// Txn(context.TODO()).If( +// Compare(Value(k1), ">", v1), +// Compare(Version(k1), "=", 2) +// ).Then( +// OpPut(k2,v2), OpPut(k3,v3) +// ).Else( +// OpPut(k4,v4), OpPut(k5,v5) +// ).Commit() +// +type Txn interface { + // If takes a list of comparison. If all comparisons passed in succeed, + // the operations passed into Then() will be executed. Or the operations + // passed into Else() will be executed. + If(cs ...Cmp) Txn + + // Then takes a list of operations. The Ops list will be executed, if the + // comparisons passed in If() succeed. + Then(ops ...Op) Txn + + // Else takes a list of operations. The Ops list will be executed, if the + // comparisons passed in If() fail. + Else(ops ...Op) Txn + + // Commit tries to commit the transaction. + Commit() (*TxnResponse, error) +} + +type txn struct { + kv *kv + ctx context.Context + + mu sync.Mutex + cif bool + cthen bool + celse bool + + isWrite bool + + cmps []*pb.Compare + + sus []*pb.RequestOp + fas []*pb.RequestOp + + callOpts []grpc.CallOption +} + +func (txn *txn) If(cs ...Cmp) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cif { + panic("cannot call If twice!") + } + + if txn.cthen { + panic("cannot call If after Then!") + } + + if txn.celse { + panic("cannot call If after Else!") + } + + txn.cif = true + + for i := range cs { + txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i])) + } + + return txn +} + +func (txn *txn) Then(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cthen { + panic("cannot call Then twice!") + } + if txn.celse { + panic("cannot call Then after Else!") + } + + txn.cthen = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.sus = append(txn.sus, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Else(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.celse { + panic("cannot call Else twice!") + } + + txn.celse = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.fas = append(txn.fas, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Commit() (*TxnResponse, error) { + txn.mu.Lock() + defer txn.mu.Unlock() + + r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} + + var resp *pb.TxnResponse + var err error + resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) + if err != nil { + return nil, toErr(txn.ctx, err) + } + return (*TxnResponse)(resp), nil +} diff --git a/vendor/go.etcd.io/etcd/clientv3/utils.go b/vendor/go.etcd.io/etcd/clientv3/utils.go new file mode 100644 index 0000000000..b998c41b90 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/utils.go @@ -0,0 +1,49 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math/rand" + "reflect" + "runtime" + "strings" + "time" +) + +// jitterUp adds random jitter to the duration. +// +// This adds or subtracts time from the duration within a given jitter fraction. +// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) +// +// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils +func jitterUp(duration time.Duration, jitter float64) time.Duration { + multiplier := jitter * (rand.Float64()*2 - 1) + return time.Duration(float64(duration) * (1 + multiplier)) +} + +// Check if the provided function is being called in the op options. +func isOpFuncCalled(op string, opts []OpOption) bool { + for _, opt := range opts { + v := reflect.ValueOf(opt) + if v.Kind() == reflect.Func { + if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil { + if strings.Contains(opFunc.Name(), op) { + return true + } + } + } + } + return false +} diff --git a/vendor/go.etcd.io/etcd/clientv3/watch.go b/vendor/go.etcd.io/etcd/clientv3/watch.go new file mode 100644 index 0000000000..66e16ad63f --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/watch.go @@ -0,0 +1,1035 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + v3rpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + mvccpb "go.etcd.io/etcd/mvcc/mvccpb" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + EventTypeDelete = mvccpb.DELETE + EventTypePut = mvccpb.PUT + + closeSendErrTimeout = 250 * time.Millisecond +) + +type Event mvccpb.Event + +type WatchChan <-chan WatchResponse + +type Watcher interface { + // Watch watches on a key or prefix. The watched events will be returned + // through the returned channel. If revisions waiting to be sent over the + // watch are compacted, then the watch will be canceled by the server, the + // client will post a compacted error watch response, and the channel will close. + // If the context "ctx" is canceled or timed out, returned "WatchChan" is closed, + // and "WatchResponse" from this closed channel has zero events and nil "Err()". + // The context "ctx" MUST be canceled, as soon as watcher is no longer being used, + // to release the associated resources. + // + // If the context is "context.Background/TODO", returned "WatchChan" will + // not be closed and block until event is triggered, except when server + // returns a non-recoverable error (e.g. ErrCompacted). + // For example, when context passed with "WithRequireLeader" and the + // connected server has no leader (e.g. due to network partition), + // error "etcdserver: no leader" (ErrNoLeader) will be returned, + // and then "WatchChan" is closed with non-nil "Err()". + // In order to prevent a watch stream being stuck in a partitioned node, + // make sure to wrap context with "WithRequireLeader". + // + // Otherwise, as long as the context has not been canceled or timed out, + // watch will retry on other recoverable errors forever until reconnected. + // + // TODO: explicitly set context error in the last "WatchResponse" message and close channel? + // Currently, client contexts are overwritten with "valCtx" that never closes. + // TODO(v3.4): configure watch retry policy, limit maximum retry number + // (see https://github.com/etcd-io/etcd/issues/8980) + Watch(ctx context.Context, key string, opts ...OpOption) WatchChan + + // RequestProgress requests a progress notify response be sent in all watch channels. + RequestProgress(ctx context.Context) error + + // Close closes the watcher and cancels all watch requests. + Close() error +} + +type WatchResponse struct { + Header pb.ResponseHeader + Events []*Event + + // CompactRevision is the minimum revision the watcher may receive. + CompactRevision int64 + + // Canceled is used to indicate watch failure. + // If the watch failed and the stream was about to close, before the channel is closed, + // the channel sends a final response that has Canceled set to true with a non-nil Err(). + Canceled bool + + // Created is used to indicate the creation of the watcher. + Created bool + + closeErr error + + // cancelReason is a reason of canceling watch + cancelReason string +} + +// IsCreate returns true if the event tells that the key is newly created. +func (e *Event) IsCreate() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision +} + +// IsModify returns true if the event tells that a new value is put on existing key. +func (e *Event) IsModify() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision +} + +// Err is the error value if this WatchResponse holds an error. +func (wr *WatchResponse) Err() error { + switch { + case wr.closeErr != nil: + return v3rpc.Error(wr.closeErr) + case wr.CompactRevision != 0: + return v3rpc.ErrCompacted + case wr.Canceled: + if len(wr.cancelReason) != 0 { + return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) + } + return v3rpc.ErrFutureRev + } + return nil +} + +// IsProgressNotify returns true if the WatchResponse is progress notification. +func (wr *WatchResponse) IsProgressNotify() bool { + return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 +} + +// watcher implements the Watcher interface +type watcher struct { + remote pb.WatchClient + callOpts []grpc.CallOption + + // mu protects the grpc streams map + mu sync.RWMutex + + // streams holds all the active grpc streams keyed by ctx value. + streams map[string]*watchGrpcStream + lg *zap.Logger +} + +// watchGrpcStream tracks all watch resources attached to a single grpc stream. +type watchGrpcStream struct { + owner *watcher + remote pb.WatchClient + callOpts []grpc.CallOption + + // ctx controls internal remote.Watch requests + ctx context.Context + // ctxKey is the key used when looking up this stream's context + ctxKey string + cancel context.CancelFunc + + // substreams holds all active watchers on this grpc stream + substreams map[int64]*watcherStream + // resuming holds all resuming watchers on this grpc stream + resuming []*watcherStream + + // reqc sends a watch request from Watch() to the main goroutine + reqc chan watchStreamRequest + // respc receives data from the watch client + respc chan *pb.WatchResponse + // donec closes to broadcast shutdown + donec chan struct{} + // errc transmits errors from grpc Recv to the watch stream reconnect logic + errc chan error + // closingc gets the watcherStream of closing watchers + closingc chan *watcherStream + // wg is Done when all substream goroutines have exited + wg sync.WaitGroup + + // resumec closes to signal that all substreams should begin resuming + resumec chan struct{} + // closeErr is the error that closed the watch stream + closeErr error + + lg *zap.Logger +} + +// watchStreamRequest is a union of the supported watch request operation types +type watchStreamRequest interface { + toPB() *pb.WatchRequest +} + +// watchRequest is issued by the subscriber to start a new watcher +type watchRequest struct { + ctx context.Context + key string + end string + rev int64 + + // send created notification event if this field is true + createdNotify bool + // progressNotify is for progress updates + progressNotify bool + // fragmentation should be disabled by default + // if true, split watch events when total exceeds + // "--max-request-bytes" flag value + 512-byte + fragment bool + + // filters is the list of events to filter out + filters []pb.WatchCreateRequest_FilterType + // get the previous key-value pair before the event happens + prevKV bool + // retc receives a chan WatchResponse once the watcher is established + retc chan chan WatchResponse +} + +// progressRequest is issued by the subscriber to request watch progress +type progressRequest struct { +} + +// watcherStream represents a registered watcher +type watcherStream struct { + // initReq is the request that initiated this request + initReq watchRequest + + // outc publishes watch responses to subscriber + outc chan WatchResponse + // recvc buffers watch responses before publishing + recvc chan *WatchResponse + // donec closes when the watcherStream goroutine stops. + donec chan struct{} + // closing is set to true when stream should be scheduled to shutdown. + closing bool + // id is the registered watch id on the grpc stream + id int64 + + // buf holds all events received from etcd but not yet consumed by the client + buf []*WatchResponse +} + +func NewWatcher(c *Client) Watcher { + return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c) +} + +func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { + w := &watcher{ + remote: wc, + streams: make(map[string]*watchGrpcStream), + } + if c != nil { + w.callOpts = c.callOpts + w.lg = c.lg + } + return w +} + +// never closes +var valCtxCh = make(chan struct{}) +var zeroTime = time.Unix(0, 0) + +// ctx with only the values; never Done +type valCtx struct{ context.Context } + +func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } +func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } +func (vc *valCtx) Err() error { return nil } + +func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { + ctx, cancel := context.WithCancel(&valCtx{inctx}) + wgs := &watchGrpcStream{ + owner: w, + remote: w.remote, + callOpts: w.callOpts, + ctx: ctx, + ctxKey: streamKeyFromCtx(inctx), + cancel: cancel, + substreams: make(map[int64]*watcherStream), + respc: make(chan *pb.WatchResponse), + reqc: make(chan watchStreamRequest), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), + lg: w.lg, + } + go wgs.run() + return wgs +} + +// Watch posts a watch request to run() and waits for a new watcher channel +func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { + ow := opWatch(key, opts...) + + var filters []pb.WatchCreateRequest_FilterType + if ow.filterPut { + filters = append(filters, pb.WatchCreateRequest_NOPUT) + } + if ow.filterDelete { + filters = append(filters, pb.WatchCreateRequest_NODELETE) + } + + wr := &watchRequest{ + ctx: ctx, + createdNotify: ow.createdNotify, + key: string(ow.key), + end: string(ow.end), + rev: ow.rev, + progressNotify: ow.progressNotify, + fragment: ow.fragment, + filters: filters, + prevKV: ow.prevKV, + retc: make(chan chan WatchResponse, 1), + } + + ok := false + ctxKey := streamKeyFromCtx(ctx) + + // find or allocate appropriate grpc watch stream + w.mu.Lock() + if w.streams == nil { + // closed + w.mu.Unlock() + ch := make(chan WatchResponse) + close(ch) + return ch + } + wgs := w.streams[ctxKey] + if wgs == nil { + wgs = w.newWatcherGrpcStream(ctx) + w.streams[ctxKey] = wgs + } + donec := wgs.donec + reqc := wgs.reqc + w.mu.Unlock() + + // couldn't create channel; return closed channel + closeCh := make(chan WatchResponse, 1) + + // submit request + select { + case reqc <- wr: + ok = true + case <-wr.ctx.Done(): + case <-donec: + if wgs.closeErr != nil { + closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + return w.Watch(ctx, key, opts...) + } + + // receive channel + if ok { + select { + case ret := <-wr.retc: + return ret + case <-ctx.Done(): + case <-donec: + if wgs.closeErr != nil { + closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + return w.Watch(ctx, key, opts...) + } + } + + close(closeCh) + return closeCh +} + +func (w *watcher) Close() (err error) { + w.mu.Lock() + streams := w.streams + w.streams = nil + w.mu.Unlock() + for _, wgs := range streams { + if werr := wgs.close(); werr != nil { + err = werr + } + } + // Consider context.Canceled as a successful close + if err == context.Canceled { + err = nil + } + return err +} + +// RequestProgress requests a progress notify response be sent in all watch channels. +func (w *watcher) RequestProgress(ctx context.Context) (err error) { + ctxKey := streamKeyFromCtx(ctx) + + w.mu.Lock() + if w.streams == nil { + w.mu.Unlock() + return fmt.Errorf("no stream found for context") + } + wgs := w.streams[ctxKey] + if wgs == nil { + wgs = w.newWatcherGrpcStream(ctx) + w.streams[ctxKey] = wgs + } + donec := wgs.donec + reqc := wgs.reqc + w.mu.Unlock() + + pr := &progressRequest{} + + select { + case reqc <- pr: + return nil + case <-ctx.Done(): + if err == nil { + return ctx.Err() + } + return err + case <-donec: + if wgs.closeErr != nil { + return wgs.closeErr + } + // retry; may have dropped stream from no ctxs + return w.RequestProgress(ctx) + } +} + +func (w *watchGrpcStream) close() (err error) { + w.cancel() + <-w.donec + select { + case err = <-w.errc: + default: + } + return toErr(w.ctx, err) +} + +func (w *watcher) closeStream(wgs *watchGrpcStream) { + w.mu.Lock() + close(wgs.donec) + wgs.cancel() + if w.streams != nil { + delete(w.streams, wgs.ctxKey) + } + w.mu.Unlock() +} + +func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { + // check watch ID for backward compatibility (<= v3.3) + if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") { + w.closeErr = v3rpc.Error(errors.New(resp.CancelReason)) + // failed; no channel + close(ws.recvc) + return + } + ws.id = resp.WatchId + w.substreams[ws.id] = ws +} + +func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { + select { + case ws.outc <- *resp: + case <-ws.initReq.ctx.Done(): + case <-time.After(closeSendErrTimeout): + } + close(ws.outc) +} + +func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { + // send channel response in case stream was never established + select { + case ws.initReq.retc <- ws.outc: + default: + } + // close subscriber's channel + if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { + go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr}) + } else if ws.outc != nil { + close(ws.outc) + } + if ws.id != -1 { + delete(w.substreams, ws.id) + return + } + for i := range w.resuming { + if w.resuming[i] == ws { + w.resuming[i] = nil + return + } + } +} + +// run is the root of the goroutines for managing a watcher client +func (w *watchGrpcStream) run() { + var wc pb.Watch_WatchClient + var closeErr error + + // substreams marked to close but goroutine still running; needed for + // avoiding double-closing recvc on grpc stream teardown + closing := make(map[*watcherStream]struct{}) + + defer func() { + w.closeErr = closeErr + // shutdown substreams and resuming substreams + for _, ws := range w.substreams { + if _, ok := closing[ws]; !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + for _, ws := range w.resuming { + if _, ok := closing[ws]; ws != nil && !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + w.joinSubstreams() + for range closing { + w.closeSubstream(<-w.closingc) + } + w.wg.Wait() + w.owner.closeStream(w) + }() + + // start a stream with the etcd grpc server + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + + cancelSet := make(map[int64]struct{}) + + var cur *pb.WatchResponse + for { + select { + // Watch() requested + case req := <-w.reqc: + switch wreq := req.(type) { + case *watchRequest: + outc := make(chan WatchResponse, 1) + // TODO: pass custom watch ID? + ws := &watcherStream{ + initReq: *wreq, + id: -1, + outc: outc, + // unbuffered so resumes won't cause repeat events + recvc: make(chan *WatchResponse), + } + + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + + // queue up for watcher creation/resume + w.resuming = append(w.resuming, ws) + if len(w.resuming) == 1 { + // head of resume queue, can register a new watcher + if err := wc.Send(ws.initReq.toPB()); err != nil { + if w.lg != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + } + case *progressRequest: + if err := wc.Send(wreq.toPB()); err != nil { + if w.lg != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + } + + // new events from the watch client + case pbresp := <-w.respc: + if cur == nil || pbresp.Created || pbresp.Canceled { + cur = pbresp + } else if cur != nil && cur.WatchId == pbresp.WatchId { + // merge new events + cur.Events = append(cur.Events, pbresp.Events...) + // update "Fragment" field; last response with "Fragment" == false + cur.Fragment = pbresp.Fragment + } + + switch { + case pbresp.Created: + // response to head of queue creation + if ws := w.resuming[0]; ws != nil { + w.addSubstream(pbresp, ws) + w.dispatchEvent(pbresp) + w.resuming[0] = nil + } + + if ws := w.nextResume(); ws != nil { + if err := wc.Send(ws.initReq.toPB()); err != nil { + if w.lg != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + } + + // reset for next iteration + cur = nil + + case pbresp.Canceled && pbresp.CompactRevision == 0: + delete(cancelSet, pbresp.WatchId) + if ws, ok := w.substreams[pbresp.WatchId]; ok { + // signal to stream goroutine to update closingc + close(ws.recvc) + closing[ws] = struct{}{} + } + + // reset for next iteration + cur = nil + + case cur.Fragment: + // watch response events are still fragmented + // continue to fetch next fragmented event arrival + continue + + default: + // dispatch to appropriate watch stream + ok := w.dispatchEvent(cur) + + // reset for next iteration + cur = nil + + if ok { + break + } + + // watch response on unexpected watch id; cancel id + if _, ok := cancelSet[pbresp.WatchId]; ok { + break + } + + cancelSet[pbresp.WatchId] = struct{}{} + cr := &pb.WatchRequest_CancelRequest{ + CancelRequest: &pb.WatchCancelRequest{ + WatchId: pbresp.WatchId, + }, + } + req := &pb.WatchRequest{RequestUnion: cr} + if w.lg != nil { + w.lg.Debug("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId)) + } + if err := wc.Send(req); err != nil { + if w.lg != nil { + w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err)) + } + } + } + + // watch client failed on Recv; spawn another if possible + case err := <-w.errc: + if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { + closeErr = err + return + } + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + if ws := w.nextResume(); ws != nil { + if err := wc.Send(ws.initReq.toPB()); err != nil { + if w.lg != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + } + cancelSet = make(map[int64]struct{}) + + case <-w.ctx.Done(): + return + + case ws := <-w.closingc: + if ws.id != -1 { + // client is closing an established watch; close it on the server proactively instead of waiting + // to close when the next message arrives + cancelSet[ws.id] = struct{}{} + cr := &pb.WatchRequest_CancelRequest{ + CancelRequest: &pb.WatchCancelRequest{ + WatchId: ws.id, + }, + } + req := &pb.WatchRequest{RequestUnion: cr} + if w.lg != nil { + w.lg.Debug("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id)) + } + if err := wc.Send(req); err != nil { + if w.lg != nil { + w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err)) + } + } + } + w.closeSubstream(ws) + delete(closing, ws) + // no more watchers on this stream, shutdown + if len(w.substreams)+len(w.resuming) == 0 { + return + } + } + } +} + +// nextResume chooses the next resuming to register with the grpc stream. Abandoned +// streams are marked as nil in the queue since the head must wait for its inflight registration. +func (w *watchGrpcStream) nextResume() *watcherStream { + for len(w.resuming) != 0 { + if w.resuming[0] != nil { + return w.resuming[0] + } + w.resuming = w.resuming[1:len(w.resuming)] + } + return nil +} + +// dispatchEvent sends a WatchResponse to the appropriate watcher stream +func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { + events := make([]*Event, len(pbresp.Events)) + for i, ev := range pbresp.Events { + events[i] = (*Event)(ev) + } + // TODO: return watch ID? + wr := &WatchResponse{ + Header: *pbresp.Header, + Events: events, + CompactRevision: pbresp.CompactRevision, + Created: pbresp.Created, + Canceled: pbresp.Canceled, + cancelReason: pbresp.CancelReason, + } + + // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to + // indicate they should be broadcast. + if wr.IsProgressNotify() && pbresp.WatchId == -1 { + return w.broadcastResponse(wr) + } + + return w.unicastResponse(wr, pbresp.WatchId) + +} + +// broadcastResponse send a watch response to all watch substreams. +func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool { + for _, ws := range w.substreams { + select { + case ws.recvc <- wr: + case <-ws.donec: + } + } + return true +} + +// unicastResponse sends a watch response to a specific watch substream. +func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool { + ws, ok := w.substreams[watchId] + if !ok { + return false + } + select { + case ws.recvc <- wr: + case <-ws.donec: + return false + } + return true +} + +// serveWatchClient forwards messages from the grpc stream to run() +func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { + for { + resp, err := wc.Recv() + if err != nil { + select { + case w.errc <- err: + case <-w.donec: + } + return + } + select { + case w.respc <- resp: + case <-w.donec: + return + } + } +} + +// serveSubstream forwards watch responses from run() to the subscriber +func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { + if ws.closing { + panic("created substream goroutine but substream is closing") + } + + // nextRev is the minimum expected next revision + nextRev := ws.initReq.rev + resuming := false + defer func() { + if !resuming { + ws.closing = true + } + close(ws.donec) + if !resuming { + w.closingc <- ws + } + w.wg.Done() + }() + + emptyWr := &WatchResponse{} + for { + curWr := emptyWr + outc := ws.outc + + if len(ws.buf) > 0 { + curWr = ws.buf[0] + } else { + outc = nil + } + select { + case outc <- *curWr: + if ws.buf[0].Err() != nil { + return + } + ws.buf[0] = nil + ws.buf = ws.buf[1:] + case wr, ok := <-ws.recvc: + if !ok { + // shutdown from closeSubstream + return + } + + if wr.Created { + if ws.initReq.retc != nil { + ws.initReq.retc <- ws.outc + // to prevent next write from taking the slot in buffered channel + // and posting duplicate create events + ws.initReq.retc = nil + + // send first creation event only if requested + if ws.initReq.createdNotify { + ws.outc <- *wr + } + // once the watch channel is returned, a current revision + // watch must resume at the store revision. This is necessary + // for the following case to work as expected: + // wch := m1.Watch("a") + // m2.Put("a", "b") + // <-wch + // If the revision is only bound on the first observed event, + // if wch is disconnected before the Put is issued, then reconnects + // after it is committed, it'll miss the Put. + if ws.initReq.rev == 0 { + nextRev = wr.Header.Revision + } + } + } else { + // current progress of watch; <= store revision + nextRev = wr.Header.Revision + } + + if len(wr.Events) > 0 { + nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 + } + ws.initReq.rev = nextRev + + // created event is already sent above, + // watcher should not post duplicate events + if wr.Created { + continue + } + + // TODO pause channel if buffer gets too large + ws.buf = append(ws.buf, wr) + case <-w.ctx.Done(): + return + case <-ws.initReq.ctx.Done(): + return + case <-resumec: + resuming = true + return + } + } + // lazily send cancel message if events on missing id +} + +func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { + // mark all substreams as resuming + close(w.resumec) + w.resumec = make(chan struct{}) + w.joinSubstreams() + for _, ws := range w.substreams { + ws.id = -1 + w.resuming = append(w.resuming, ws) + } + // strip out nils, if any + var resuming []*watcherStream + for _, ws := range w.resuming { + if ws != nil { + resuming = append(resuming, ws) + } + } + w.resuming = resuming + w.substreams = make(map[int64]*watcherStream) + + // connect to grpc stream while accepting watcher cancelation + stopc := make(chan struct{}) + donec := w.waitCancelSubstreams(stopc) + wc, err := w.openWatchClient() + close(stopc) + <-donec + + // serve all non-closing streams, even if there's a client error + // so that the teardown path can shutdown the streams as expected. + for _, ws := range w.resuming { + if ws.closing { + continue + } + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + } + + if err != nil { + return nil, v3rpc.Error(err) + } + + // receive data from new grpc stream + go w.serveWatchClient(wc) + return wc, nil +} + +func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { + var wg sync.WaitGroup + wg.Add(len(w.resuming)) + donec := make(chan struct{}) + for i := range w.resuming { + go func(ws *watcherStream) { + defer wg.Done() + if ws.closing { + if ws.initReq.ctx.Err() != nil && ws.outc != nil { + close(ws.outc) + ws.outc = nil + } + return + } + select { + case <-ws.initReq.ctx.Done(): + // closed ws will be removed from resuming + ws.closing = true + close(ws.outc) + ws.outc = nil + w.wg.Add(1) + go func() { + defer w.wg.Done() + w.closingc <- ws + }() + case <-stopc: + } + }(w.resuming[i]) + } + go func() { + defer close(donec) + wg.Wait() + }() + return donec +} + +// joinSubstreams waits for all substream goroutines to complete. +func (w *watchGrpcStream) joinSubstreams() { + for _, ws := range w.substreams { + <-ws.donec + } + for _, ws := range w.resuming { + if ws != nil { + <-ws.donec + } + } +} + +var maxBackoff = 100 * time.Millisecond + +// openWatchClient retries opening a watch client until success or halt. +// manually retry in case "ws==nil && err==nil" +// TODO: remove FailFast=false +func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { + backoff := time.Millisecond + for { + select { + case <-w.ctx.Done(): + if err == nil { + return nil, w.ctx.Err() + } + return nil, err + default: + } + if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil { + break + } + if isHaltErr(w.ctx, err) { + return nil, v3rpc.Error(err) + } + if isUnavailableErr(w.ctx, err) { + // retry, but backoff + if backoff < maxBackoff { + // 25% backoff factor + backoff = backoff + backoff/4 + if backoff > maxBackoff { + backoff = maxBackoff + } + } + time.Sleep(backoff) + } + } + return ws, nil +} + +// toPB converts an internal watch request structure to its protobuf WatchRequest structure. +func (wr *watchRequest) toPB() *pb.WatchRequest { + req := &pb.WatchCreateRequest{ + StartRevision: wr.rev, + Key: []byte(wr.key), + RangeEnd: []byte(wr.end), + ProgressNotify: wr.progressNotify, + Filters: wr.filters, + PrevKv: wr.prevKV, + Fragment: wr.fragment, + } + cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} + return &pb.WatchRequest{RequestUnion: cr} +} + +// toPB converts an internal progress request structure to its protobuf WatchRequest structure. +func (pr *progressRequest) toPB() *pb.WatchRequest { + req := &pb.WatchProgressRequest{} + cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req} + return &pb.WatchRequest{RequestUnion: cr} +} + +func streamKeyFromCtx(ctx context.Context) string { + if md, ok := metadata.FromOutgoingContext(ctx); ok { + return fmt.Sprintf("%+v", md) + } + return "" +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go new file mode 100644 index 0000000000..f72c6a644f --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction. +package rpctypes diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go new file mode 100644 index 0000000000..e6a281460d --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -0,0 +1,235 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// server-side error +var ( + ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err() + ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err() + ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err() + ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err() + ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err() + ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err() + ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err() + ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err() + ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err() + + ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err() + ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() + ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err() + + ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err() + ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err() + ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err() + ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err() + ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err() + ErrGRPCMemberNotLearner = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member").Err() + ErrGRPCLearnerNotReady = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader").Err() + ErrGRPCTooManyLearners = status.New(codes.FailedPrecondition, "etcdserver: too many learner members in cluster").Err() + + ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err() + ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err() + + ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err() + ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err() + ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err() + ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err() + ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err() + ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err() + ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err() + ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err() + ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err() + ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err() + ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err() + ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err() + ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err() + ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err() + ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err() + + ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err() + ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err() + ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err() + ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err() + ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err() + ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err() + ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err() + ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() + ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() + ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() + ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err() + ErrGRPCBadLeaderTransferee = status.New(codes.FailedPrecondition, "etcdserver: bad leader transferee").Err() + + errStringToError = map[string]error{ + ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, + ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, + ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, + ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, + + ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, + ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, + ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, + ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, + ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, + + ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, + ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, + ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge, + + ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist, + ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist, + ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, + ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, + ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, + ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner, + ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady, + ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners, + + ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, + ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, + + ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist, + ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist, + ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist, + ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty, + ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, + ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, + ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, + ErrorDesc(ErrGRPCRoleEmpty): ErrGRPCRoleEmpty, + ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, + ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, + ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, + ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, + ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, + ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, + ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, + + ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, + ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, + ErrorDesc(ErrGRPCLeaderChanged): ErrGRPCLeaderChanged, + ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, + ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, + ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, + ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, + ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, + ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, + ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, + ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner, + ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee, + } +) + +// client-side error +var ( + ErrEmptyKey = Error(ErrGRPCEmptyKey) + ErrKeyNotFound = Error(ErrGRPCKeyNotFound) + ErrValueProvided = Error(ErrGRPCValueProvided) + ErrLeaseProvided = Error(ErrGRPCLeaseProvided) + ErrTooManyOps = Error(ErrGRPCTooManyOps) + ErrDuplicateKey = Error(ErrGRPCDuplicateKey) + ErrCompacted = Error(ErrGRPCCompacted) + ErrFutureRev = Error(ErrGRPCFutureRev) + ErrNoSpace = Error(ErrGRPCNoSpace) + + ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) + ErrLeaseExist = Error(ErrGRPCLeaseExist) + ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge) + + ErrMemberExist = Error(ErrGRPCMemberExist) + ErrPeerURLExist = Error(ErrGRPCPeerURLExist) + ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted) + ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs) + ErrMemberNotFound = Error(ErrGRPCMemberNotFound) + ErrMemberNotLearner = Error(ErrGRPCMemberNotLearner) + ErrMemberLearnerNotReady = Error(ErrGRPCLearnerNotReady) + ErrTooManyLearners = Error(ErrGRPCTooManyLearners) + + ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge) + ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) + + ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist) + ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist) + ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist) + ErrUserEmpty = Error(ErrGRPCUserEmpty) + ErrUserNotFound = Error(ErrGRPCUserNotFound) + ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist) + ErrRoleNotFound = Error(ErrGRPCRoleNotFound) + ErrRoleEmpty = Error(ErrGRPCRoleEmpty) + ErrAuthFailed = Error(ErrGRPCAuthFailed) + ErrPermissionDenied = Error(ErrGRPCPermissionDenied) + ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted) + ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) + ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) + ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) + ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) + + ErrNoLeader = Error(ErrGRPCNoLeader) + ErrNotLeader = Error(ErrGRPCNotLeader) + ErrLeaderChanged = Error(ErrGRPCLeaderChanged) + ErrNotCapable = Error(ErrGRPCNotCapable) + ErrStopped = Error(ErrGRPCStopped) + ErrTimeout = Error(ErrGRPCTimeout) + ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) + ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) + ErrUnhealthy = Error(ErrGRPCUnhealthy) + ErrCorrupt = Error(ErrGRPCCorrupt) + ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee) +) + +// EtcdError defines gRPC server errors. +// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323) +type EtcdError struct { + code codes.Code + desc string +} + +// Code returns grpc/codes.Code. +// TODO: define clientv3/codes.Code. +func (e EtcdError) Code() codes.Code { + return e.code +} + +func (e EtcdError) Error() string { + return e.desc +} + +func Error(err error) error { + if err == nil { + return nil + } + verr, ok := errStringToError[ErrorDesc(err)] + if !ok { // not gRPC error + return err + } + ev, ok := status.FromError(verr) + var desc string + if ok { + desc = ev.Message() + } else { + desc = verr.Error() + } + return EtcdError{code: ev.Code(), desc: desc} +} + +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go new file mode 100644 index 0000000000..90b8b835b1 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go @@ -0,0 +1,22 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +var ( + MetadataRequireLeaderKey = "hasleader" + MetadataHasLeader = "true" + + MetadataClientAPIVersionKey = "client-api-version" +) diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go new file mode 100644 index 0000000000..8f8ac60ff2 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go @@ -0,0 +1,20 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +var ( + TokenFieldNameGRPC = "token" + TokenFieldNameSwagger = "authorization" +) diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go new file mode 100644 index 0000000000..9e9b42ceac --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -0,0 +1,1041 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: etcdserver.proto + +/* + Package etcdserverpb is a generated protocol buffer package. + + It is generated from these files: + etcdserver.proto + raft_internal.proto + rpc.proto + + It has these top-level messages: + Request + Metadata + RequestHeader + InternalRaftRequest + EmptyResponse + InternalAuthenticateRequest + ResponseHeader + RangeRequest + RangeResponse + PutRequest + PutResponse + DeleteRangeRequest + DeleteRangeResponse + RequestOp + ResponseOp + Compare + TxnRequest + TxnResponse + CompactionRequest + CompactionResponse + HashRequest + HashKVRequest + HashKVResponse + HashResponse + SnapshotRequest + SnapshotResponse + WatchRequest + WatchCreateRequest + WatchCancelRequest + WatchProgressRequest + WatchResponse + LeaseGrantRequest + LeaseGrantResponse + LeaseRevokeRequest + LeaseRevokeResponse + LeaseCheckpoint + LeaseCheckpointRequest + LeaseCheckpointResponse + LeaseKeepAliveRequest + LeaseKeepAliveResponse + LeaseTimeToLiveRequest + LeaseTimeToLiveResponse + LeaseLeasesRequest + LeaseStatus + LeaseLeasesResponse + Member + MemberAddRequest + MemberAddResponse + MemberRemoveRequest + MemberRemoveResponse + MemberUpdateRequest + MemberUpdateResponse + MemberListRequest + MemberListResponse + MemberPromoteRequest + MemberPromoteResponse + DefragmentRequest + DefragmentResponse + MoveLeaderRequest + MoveLeaderResponse + AlarmRequest + AlarmMember + AlarmResponse + StatusRequest + StatusResponse + AuthEnableRequest + AuthDisableRequest + AuthenticateRequest + AuthUserAddRequest + AuthUserGetRequest + AuthUserDeleteRequest + AuthUserChangePasswordRequest + AuthUserGrantRoleRequest + AuthUserRevokeRoleRequest + AuthRoleAddRequest + AuthRoleGetRequest + AuthUserListRequest + AuthRoleListRequest + AuthRoleDeleteRequest + AuthRoleGrantPermissionRequest + AuthRoleRevokePermissionRequest + AuthEnableResponse + AuthDisableResponse + AuthenticateResponse + AuthUserAddResponse + AuthUserGetResponse + AuthUserDeleteResponse + AuthUserChangePasswordResponse + AuthUserGrantRoleResponse + AuthUserRevokeRoleResponse + AuthRoleAddResponse + AuthRoleGetResponse + AuthRoleListResponse + AuthUserListResponse + AuthRoleDeleteResponse + AuthRoleGrantPermissionResponse + AuthRoleRevokePermissionResponse +*/ +package etcdserverpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Request struct { + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` + Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` + Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` + Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` + PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` + PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` + PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` + Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` + Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` + Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` + Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` + Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` + Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` + Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` + Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` + Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} } + +type Metadata struct { + NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` + ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} } + +func init() { + proto.RegisterType((*Request)(nil), "etcdserverpb.Request") + proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") +} +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x12 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) + i += copy(dAtA[i:], m.Method) + dAtA[i] = 0x1a + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x22 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) + i += copy(dAtA[i:], m.Val) + dAtA[i] = 0x28 + i++ + if m.Dir { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x32 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) + i += copy(dAtA[i:], m.PrevValue) + dAtA[i] = 0x38 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) + if m.PrevExist != nil { + dAtA[i] = 0x40 + i++ + if *m.PrevExist { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + dAtA[i] = 0x48 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) + dAtA[i] = 0x50 + i++ + if m.Wait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) + dAtA[i] = 0x60 + i++ + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x68 + i++ + if m.Sorted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x70 + i++ + if m.Quorum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x78 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if m.Stream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.Refresh != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + if *m.Refresh { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Metadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Request) Size() (n int) { + var l int + _ = l + n += 1 + sovEtcdserver(uint64(m.ID)) + l = len(m.Method) + n += 1 + l + sovEtcdserver(uint64(l)) + l = len(m.Path) + n += 1 + l + sovEtcdserver(uint64(l)) + l = len(m.Val) + n += 1 + l + sovEtcdserver(uint64(l)) + n += 2 + l = len(m.PrevValue) + n += 1 + l + sovEtcdserver(uint64(l)) + n += 1 + sovEtcdserver(uint64(m.PrevIndex)) + if m.PrevExist != nil { + n += 2 + } + n += 1 + sovEtcdserver(uint64(m.Expiration)) + n += 2 + n += 1 + sovEtcdserver(uint64(m.Since)) + n += 2 + n += 2 + n += 2 + n += 1 + sovEtcdserver(uint64(m.Time)) + n += 3 + if m.Refresh != nil { + n += 3 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metadata) Size() (n int) { + var l int + _ = l + n += 1 + sovEtcdserver(uint64(m.NodeID)) + n += 1 + sovEtcdserver(uint64(m.ClusterID)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEtcdserver(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEtcdserver(x uint64) (n int) { + return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Method = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Dir = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrevValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType) + } + m.PrevIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PrevIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PrevExist = &b + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType) + } + m.Expiration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Expiration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Wait = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + m.Since = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Since |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Sorted = bool(v != 0) + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Quorum = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + m.Time = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Time |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stream = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Refresh = &b + default: + iNdEx = preIndex + skippy, err := skipEtcdserver(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEtcdserver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + m.ClusterID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEtcdserver(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEtcdserver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEtcdserver(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEtcdserver + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEtcdserver(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } + +var fileDescriptorEtcdserver = []byte{ + // 380 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, + 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, + 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, + 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, + 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, + 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, + 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, + 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, + 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, + 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, + 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, + 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, + 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, + 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, + 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, + 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, + 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, + 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, + 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, + 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, + 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, + 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, + 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, + 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto new file mode 100644 index 0000000000..25e0aca5d9 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto @@ -0,0 +1,34 @@ +syntax = "proto2"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message Request { + optional uint64 ID = 1 [(gogoproto.nullable) = false]; + optional string Method = 2 [(gogoproto.nullable) = false]; + optional string Path = 3 [(gogoproto.nullable) = false]; + optional string Val = 4 [(gogoproto.nullable) = false]; + optional bool Dir = 5 [(gogoproto.nullable) = false]; + optional string PrevValue = 6 [(gogoproto.nullable) = false]; + optional uint64 PrevIndex = 7 [(gogoproto.nullable) = false]; + optional bool PrevExist = 8 [(gogoproto.nullable) = true]; + optional int64 Expiration = 9 [(gogoproto.nullable) = false]; + optional bool Wait = 10 [(gogoproto.nullable) = false]; + optional uint64 Since = 11 [(gogoproto.nullable) = false]; + optional bool Recursive = 12 [(gogoproto.nullable) = false]; + optional bool Sorted = 13 [(gogoproto.nullable) = false]; + optional bool Quorum = 14 [(gogoproto.nullable) = false]; + optional int64 Time = 15 [(gogoproto.nullable) = false]; + optional bool Stream = 16 [(gogoproto.nullable) = false]; + optional bool Refresh = 17 [(gogoproto.nullable) = true]; +} + +message Metadata { + optional uint64 NodeID = 1 [(gogoproto.nullable) = false]; + optional uint64 ClusterID = 2 [(gogoproto.nullable) = false]; +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go new file mode 100644 index 0000000000..b170499e4b --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -0,0 +1,2127 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: raft_internal.proto + +package etcdserverpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type RequestHeader struct { + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // username is a username that is associated with an auth token of gRPC connection + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + // auth_revision is a revision number of auth.authStore. It is not related to mvcc + AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` +} + +func (m *RequestHeader) Reset() { *m = RequestHeader{} } +func (m *RequestHeader) String() string { return proto.CompactTextString(m) } +func (*RequestHeader) ProtoMessage() {} +func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} } + +// An InternalRaftRequest is the union of all requests which can be +// sent via raft. +type InternalRaftRequest struct { + Header *RequestHeader `protobuf:"bytes,100,opt,name=header" json:"header,omitempty"` + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + V2 *Request `protobuf:"bytes,2,opt,name=v2" json:"v2,omitempty"` + Range *RangeRequest `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"` + Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"` + DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange" json:"delete_range,omitempty"` + Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn" json:"txn,omitempty"` + Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction" json:"compaction,omitempty"` + LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant" json:"lease_grant,omitempty"` + LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke" json:"lease_revoke,omitempty"` + Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm" json:"alarm,omitempty"` + LeaseCheckpoint *LeaseCheckpointRequest `protobuf:"bytes,11,opt,name=lease_checkpoint,json=leaseCheckpoint" json:"lease_checkpoint,omitempty"` + AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable" json:"auth_enable,omitempty"` + AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable" json:"auth_disable,omitempty"` + Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate" json:"authenticate,omitempty"` + AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd" json:"auth_user_add,omitempty"` + AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete" json:"auth_user_delete,omitempty"` + AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet" json:"auth_user_get,omitempty"` + AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword" json:"auth_user_change_password,omitempty"` + AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole" json:"auth_user_grant_role,omitempty"` + AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole" json:"auth_user_revoke_role,omitempty"` + AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList" json:"auth_user_list,omitempty"` + AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList" json:"auth_role_list,omitempty"` + AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd" json:"auth_role_add,omitempty"` + AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete" json:"auth_role_delete,omitempty"` + AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet" json:"auth_role_get,omitempty"` + AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission" json:"auth_role_grant_permission,omitempty"` + AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission" json:"auth_role_revoke_permission,omitempty"` +} + +func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } +func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } +func (*InternalRaftRequest) ProtoMessage() {} +func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} } + +type EmptyResponse struct { +} + +func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } +func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } +func (*EmptyResponse) ProtoMessage() {} +func (*EmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{2} } + +// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? +// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. +// For avoiding misusage the field, we have an internal version of AuthenticateRequest. +type InternalAuthenticateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + // simple_token is generated in API layer (etcdserver/v3_server.go) + SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` +} + +func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} } +func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*InternalAuthenticateRequest) ProtoMessage() {} +func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRaftInternal, []int{3} +} + +func init() { + proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader") + proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") + proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse") + proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest") +} +func (m *RequestHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) + } + if len(m.Username) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + } + if m.AuthRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) + } + return i, nil +} + +func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) + } + if m.V2 != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.V2.Size())) + n1, err := m.V2.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Range != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Range.Size())) + n2, err := m.Range.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Put != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Put.Size())) + n3, err := m.Put.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.DeleteRange != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.DeleteRange.Size())) + n4, err := m.DeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Txn != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Txn.Size())) + n5, err := m.Txn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.Compaction != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Compaction.Size())) + n6, err := m.Compaction.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.LeaseGrant != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseGrant.Size())) + n7, err := m.LeaseGrant.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.LeaseRevoke != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseRevoke.Size())) + n8, err := m.LeaseRevoke.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Alarm != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Alarm.Size())) + n9, err := m.Alarm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.LeaseCheckpoint != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseCheckpoint.Size())) + n10, err := m.LeaseCheckpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.Header != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x6 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size())) + n11, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.AuthEnable != nil { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x3e + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthEnable.Size())) + n12, err := m.AuthEnable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.AuthDisable != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x3f + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthDisable.Size())) + n13, err := m.AuthDisable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.Authenticate != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x3f + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Authenticate.Size())) + n14, err := m.Authenticate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.AuthUserAdd != nil { + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserAdd.Size())) + n15, err := m.AuthUserAdd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.AuthUserDelete != nil { + dAtA[i] = 0xea + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserDelete.Size())) + n16, err := m.AuthUserDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.AuthUserGet != nil { + dAtA[i] = 0xf2 + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGet.Size())) + n17, err := m.AuthUserGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.AuthUserChangePassword != nil { + dAtA[i] = 0xfa + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserChangePassword.Size())) + n18, err := m.AuthUserChangePassword.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.AuthUserGrantRole != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGrantRole.Size())) + n19, err := m.AuthUserGrantRole.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.AuthUserRevokeRole != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserRevokeRole.Size())) + n20, err := m.AuthUserRevokeRole.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.AuthUserList != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserList.Size())) + n21, err := m.AuthUserList.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.AuthRoleList != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleList.Size())) + n22, err := m.AuthRoleList.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if m.AuthRoleAdd != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleAdd.Size())) + n23, err := m.AuthRoleAdd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.AuthRoleDelete != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleDelete.Size())) + n24, err := m.AuthRoleDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.AuthRoleGet != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGet.Size())) + n25, err := m.AuthRoleGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if m.AuthRoleGrantPermission != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGrantPermission.Size())) + n26, err := m.AuthRoleGrantPermission.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.AuthRoleRevokePermission != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleRevokePermission.Size())) + n27, err := m.AuthRoleRevokePermission.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + return i, nil +} + +func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + if len(m.SimpleToken) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) + i += copy(dAtA[i:], m.SimpleToken) + } + return i, nil +} + +func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RequestHeader) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaftInternal(uint64(m.ID)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRevision != 0 { + n += 1 + sovRaftInternal(uint64(m.AuthRevision)) + } + return n +} + +func (m *InternalRaftRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaftInternal(uint64(m.ID)) + } + if m.V2 != nil { + l = m.V2.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Range != nil { + l = m.Range.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Put != nil { + l = m.Put.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.DeleteRange != nil { + l = m.DeleteRange.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Txn != nil { + l = m.Txn.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Compaction != nil { + l = m.Compaction.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseGrant != nil { + l = m.LeaseGrant.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseRevoke != nil { + l = m.LeaseRevoke.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Alarm != nil { + l = m.Alarm.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseCheckpoint != nil { + l = m.LeaseCheckpoint.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Header != nil { + l = m.Header.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthEnable != nil { + l = m.AuthEnable.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthDisable != nil { + l = m.AuthDisable.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.Authenticate != nil { + l = m.Authenticate.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserAdd != nil { + l = m.AuthUserAdd.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserDelete != nil { + l = m.AuthUserDelete.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserGet != nil { + l = m.AuthUserGet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserChangePassword != nil { + l = m.AuthUserChangePassword.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserGrantRole != nil { + l = m.AuthUserGrantRole.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserRevokeRole != nil { + l = m.AuthUserRevokeRole.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserList != nil { + l = m.AuthUserList.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleList != nil { + l = m.AuthRoleList.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleAdd != nil { + l = m.AuthRoleAdd.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleDelete != nil { + l = m.AuthRoleDelete.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleGet != nil { + l = m.AuthRoleGet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleGrantPermission != nil { + l = m.AuthRoleGrantPermission.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleRevokePermission != nil { + l = m.AuthRoleRevokePermission.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + return n +} + +func (m *EmptyResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *InternalAuthenticateRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + l = len(m.SimpleToken) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + return n +} + +func sovRaftInternal(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaftInternal(x uint64) (n int) { + return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RequestHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType) + } + m.AuthRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AuthRevision |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.V2 == nil { + m.V2 = &Request{} + } + if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Range == nil { + m.Range = &RangeRequest{} + } + if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Put == nil { + m.Put = &PutRequest{} + } + if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteRange == nil { + m.DeleteRange = &DeleteRangeRequest{} + } + if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Txn == nil { + m.Txn = &TxnRequest{} + } + if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Compaction == nil { + m.Compaction = &CompactionRequest{} + } + if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseGrant == nil { + m.LeaseGrant = &LeaseGrantRequest{} + } + if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseRevoke == nil { + m.LeaseRevoke = &LeaseRevokeRequest{} + } + if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Alarm == nil { + m.Alarm = &AlarmRequest{} + } + if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseCheckpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseCheckpoint == nil { + m.LeaseCheckpoint = &LeaseCheckpointRequest{} + } + if err := m.LeaseCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1000: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthEnable == nil { + m.AuthEnable = &AuthEnableRequest{} + } + if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1011: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthDisable == nil { + m.AuthDisable = &AuthDisableRequest{} + } + if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1012: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Authenticate == nil { + m.Authenticate = &InternalAuthenticateRequest{} + } + if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserAdd == nil { + m.AuthUserAdd = &AuthUserAddRequest{} + } + if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1101: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserDelete == nil { + m.AuthUserDelete = &AuthUserDeleteRequest{} + } + if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1102: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserGet == nil { + m.AuthUserGet = &AuthUserGetRequest{} + } + if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1103: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserChangePassword == nil { + m.AuthUserChangePassword = &AuthUserChangePasswordRequest{} + } + if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1104: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserGrantRole == nil { + m.AuthUserGrantRole = &AuthUserGrantRoleRequest{} + } + if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1105: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserRevokeRole == nil { + m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{} + } + if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1106: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserList == nil { + m.AuthUserList = &AuthUserListRequest{} + } + if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1107: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleList == nil { + m.AuthRoleList = &AuthRoleListRequest{} + } + if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1200: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleAdd == nil { + m.AuthRoleAdd = &AuthRoleAddRequest{} + } + if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1201: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleDelete == nil { + m.AuthRoleDelete = &AuthRoleDeleteRequest{} + } + if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1202: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleGet == nil { + m.AuthRoleGet = &AuthRoleGetRequest{} + } + if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1203: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleGrantPermission == nil { + m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{} + } + if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1204: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleRevokePermission == nil { + m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{} + } + if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmptyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SimpleToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaftInternal(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaftInternal + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaftInternal(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftInternal) } + +var fileDescriptorRaftInternal = []byte{ + // 867 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xcb, 0x52, 0x33, 0x45, + 0x14, 0xc7, 0x49, 0xb8, 0xa6, 0x13, 0x2e, 0x36, 0xa0, 0x6d, 0xa8, 0x8a, 0x10, 0xbc, 0xe0, 0x0d, + 0xad, 0xf0, 0x00, 0x1a, 0x13, 0x0a, 0xa8, 0xa2, 0x90, 0x9a, 0xc2, 0x2a, 0xab, 0x5c, 0x8c, 0xcd, + 0xcc, 0x21, 0x19, 0x99, 0xcc, 0x8c, 0x3d, 0x9d, 0x88, 0x6f, 0xe2, 0x63, 0x78, 0xdb, 0xbb, 0x65, + 0xe1, 0x05, 0xf5, 0x05, 0x14, 0x37, 0xee, 0xbf, 0xef, 0x01, 0xbe, 0xea, 0xcb, 0xf4, 0x64, 0x92, + 0x0e, 0xbb, 0xc9, 0x39, 0xff, 0xf3, 0xfb, 0x9f, 0x99, 0x3e, 0x07, 0x1a, 0x6d, 0x32, 0x7a, 0xc3, + 0xdd, 0x20, 0xe2, 0xc0, 0x22, 0x1a, 0x1e, 0x26, 0x2c, 0xe6, 0x31, 0xae, 0x01, 0xf7, 0xfc, 0x14, + 0xd8, 0x08, 0x58, 0x72, 0x5d, 0xdf, 0xea, 0xc5, 0xbd, 0x58, 0x26, 0x3e, 0x10, 0x4f, 0x4a, 0x53, + 0xdf, 0xc8, 0x35, 0x3a, 0x52, 0x61, 0x89, 0xa7, 0x1e, 0x9b, 0x5f, 0xa2, 0x55, 0x07, 0xbe, 0x1e, + 0x42, 0xca, 0x4f, 0x81, 0xfa, 0xc0, 0xf0, 0x1a, 0x2a, 0x9f, 0x75, 0x49, 0x69, 0xb7, 0x74, 0xb0, + 0xe0, 0x94, 0xcf, 0xba, 0xb8, 0x8e, 0x56, 0x86, 0xa9, 0xb0, 0x1c, 0x00, 0x29, 0xef, 0x96, 0x0e, + 0x2a, 0x8e, 0xf9, 0x8d, 0xf7, 0xd1, 0x2a, 0x1d, 0xf2, 0xbe, 0xcb, 0x60, 0x14, 0xa4, 0x41, 0x1c, + 0x91, 0x79, 0x59, 0x56, 0x13, 0x41, 0x47, 0xc7, 0x9a, 0xbf, 0xac, 0xa3, 0xcd, 0x33, 0xdd, 0xb5, + 0x43, 0x6f, 0xb8, 0xb6, 0x9b, 0x32, 0x7a, 0x03, 0x95, 0x47, 0x2d, 0x69, 0x51, 0x6d, 0x6d, 0x1f, + 0x8e, 0xbf, 0xd7, 0xa1, 0x2e, 0x71, 0xca, 0xa3, 0x16, 0xfe, 0x10, 0x2d, 0x32, 0x1a, 0xf5, 0x40, + 0x7a, 0x55, 0x5b, 0xf5, 0x09, 0xa5, 0x48, 0x65, 0x72, 0x25, 0xc4, 0xef, 0xa0, 0xf9, 0x64, 0xc8, + 0xc9, 0x82, 0xd4, 0x93, 0xa2, 0xfe, 0x72, 0x98, 0xf5, 0xe3, 0x08, 0x11, 0xee, 0xa0, 0x9a, 0x0f, + 0x21, 0x70, 0x70, 0x95, 0xc9, 0xa2, 0x2c, 0xda, 0x2d, 0x16, 0x75, 0xa5, 0xa2, 0x60, 0x55, 0xf5, + 0xf3, 0x98, 0x30, 0xe4, 0x77, 0x11, 0x59, 0xb2, 0x19, 0x5e, 0xdd, 0x45, 0xc6, 0x90, 0xdf, 0x45, + 0xf8, 0x23, 0x84, 0xbc, 0x78, 0x90, 0x50, 0x8f, 0x8b, 0xef, 0xb7, 0x2c, 0x4b, 0x5e, 0x2b, 0x96, + 0x74, 0x4c, 0x3e, 0xab, 0x1c, 0x2b, 0xc1, 0x1f, 0xa3, 0x6a, 0x08, 0x34, 0x05, 0xb7, 0xc7, 0x68, + 0xc4, 0xc9, 0x8a, 0x8d, 0x70, 0x2e, 0x04, 0x27, 0x22, 0x6f, 0x08, 0xa1, 0x09, 0x89, 0x77, 0x56, + 0x04, 0x06, 0xa3, 0xf8, 0x16, 0x48, 0xc5, 0xf6, 0xce, 0x12, 0xe1, 0x48, 0x81, 0x79, 0xe7, 0x30, + 0x8f, 0x89, 0x63, 0xa1, 0x21, 0x65, 0x03, 0x82, 0x6c, 0xc7, 0xd2, 0x16, 0x29, 0x73, 0x2c, 0x52, + 0x88, 0x3f, 0x45, 0x1b, 0xca, 0xd6, 0xeb, 0x83, 0x77, 0x9b, 0xc4, 0x41, 0xc4, 0x49, 0x55, 0x16, + 0xbf, 0x6e, 0xb1, 0xee, 0x18, 0x51, 0x86, 0x59, 0x0f, 0x8b, 0x71, 0x7c, 0x84, 0x96, 0xfa, 0x72, + 0x86, 0x89, 0x2f, 0x31, 0x3b, 0xd6, 0x21, 0x52, 0x63, 0xee, 0x68, 0x29, 0x6e, 0xa3, 0xaa, 0x1c, + 0x61, 0x88, 0xe8, 0x75, 0x08, 0xe4, 0x7f, 0xeb, 0x09, 0xb4, 0x87, 0xbc, 0x7f, 0x2c, 0x05, 0xe6, + 0xfb, 0x51, 0x13, 0xc2, 0x5d, 0x24, 0x07, 0xde, 0xf5, 0x83, 0x54, 0x32, 0x9e, 0x2d, 0xdb, 0x3e, + 0xa0, 0x60, 0x74, 0x95, 0xc2, 0x7c, 0x40, 0x9a, 0xc7, 0xf0, 0x85, 0xa2, 0x40, 0xc4, 0x03, 0x8f, + 0x72, 0x20, 0xcf, 0x15, 0xe5, 0xed, 0x22, 0x25, 0x5b, 0xa4, 0xf6, 0x98, 0x34, 0xc3, 0x15, 0xea, + 0xf1, 0xb1, 0xde, 0x4d, 0xb1, 0xac, 0x2e, 0xf5, 0x7d, 0xf2, 0xeb, 0xca, 0xac, 0xb6, 0x3e, 0x4b, + 0x81, 0xb5, 0x7d, 0xbf, 0xd0, 0x96, 0x8e, 0xe1, 0x0b, 0xb4, 0x91, 0x63, 0xd4, 0x90, 0x93, 0xdf, + 0x14, 0x69, 0xdf, 0x4e, 0xd2, 0xdb, 0xa1, 0x61, 0x6b, 0xb4, 0x10, 0x2e, 0xb6, 0xd5, 0x03, 0x4e, + 0x7e, 0x7f, 0xb2, 0xad, 0x13, 0xe0, 0x53, 0x6d, 0x9d, 0x00, 0xc7, 0x3d, 0xf4, 0x6a, 0x8e, 0xf1, + 0xfa, 0x62, 0xed, 0xdc, 0x84, 0xa6, 0xe9, 0x37, 0x31, 0xf3, 0xc9, 0x1f, 0x0a, 0xf9, 0xae, 0x1d, + 0xd9, 0x91, 0xea, 0x4b, 0x2d, 0xce, 0xe8, 0x2f, 0x53, 0x6b, 0x1a, 0x7f, 0x8e, 0xb6, 0xc6, 0xfa, + 0x15, 0xfb, 0xe2, 0xb2, 0x38, 0x04, 0xf2, 0xa0, 0x3c, 0xde, 0x9c, 0xd1, 0xb6, 0xdc, 0xb5, 0x38, + 0x3f, 0xea, 0x97, 0xe8, 0x64, 0x06, 0x7f, 0x81, 0xb6, 0x73, 0xb2, 0x5a, 0x3d, 0x85, 0xfe, 0x53, + 0xa1, 0xdf, 0xb2, 0xa3, 0xf5, 0x0e, 0x8e, 0xb1, 0x31, 0x9d, 0x4a, 0xe1, 0x53, 0xb4, 0x96, 0xc3, + 0xc3, 0x20, 0xe5, 0xe4, 0x2f, 0x45, 0xdd, 0xb3, 0x53, 0xcf, 0x83, 0x94, 0x17, 0xe6, 0x28, 0x0b, + 0x1a, 0x92, 0x68, 0x4d, 0x91, 0xfe, 0x9e, 0x49, 0x12, 0xd6, 0x53, 0xa4, 0x2c, 0x68, 0x8e, 0x5e, + 0x92, 0xc4, 0x44, 0x7e, 0x5f, 0x99, 0x75, 0xf4, 0xa2, 0x66, 0x72, 0x22, 0x75, 0xcc, 0x4c, 0xa4, + 0xc4, 0xe8, 0x89, 0xfc, 0xa1, 0x32, 0x6b, 0x22, 0x45, 0x95, 0x65, 0x22, 0xf3, 0x70, 0xb1, 0x2d, + 0x31, 0x91, 0x3f, 0x3e, 0xd9, 0xd6, 0xe4, 0x44, 0xea, 0x18, 0xfe, 0x0a, 0xd5, 0xc7, 0x30, 0x72, + 0x50, 0x12, 0x60, 0x83, 0x20, 0x95, 0xff, 0x18, 0x7f, 0x52, 0xcc, 0xf7, 0x66, 0x30, 0x85, 0xfc, + 0xd2, 0xa8, 0x33, 0xfe, 0x2b, 0xd4, 0x9e, 0xc7, 0x03, 0xb4, 0x93, 0x7b, 0xe9, 0xd1, 0x19, 0x33, + 0xfb, 0x59, 0x99, 0xbd, 0x6f, 0x37, 0x53, 0x53, 0x32, 0xed, 0x46, 0xe8, 0x0c, 0x41, 0x73, 0x1d, + 0xad, 0x1e, 0x0f, 0x12, 0xfe, 0xad, 0x03, 0x69, 0x12, 0x47, 0x29, 0x34, 0x13, 0xb4, 0xf3, 0xc4, + 0x1f, 0x22, 0x8c, 0xd1, 0x82, 0xbc, 0x2e, 0x94, 0xe4, 0x75, 0x41, 0x3e, 0x8b, 0x6b, 0x84, 0xd9, + 0x4f, 0x7d, 0x8d, 0xc8, 0x7e, 0xe3, 0x3d, 0x54, 0x4b, 0x83, 0x41, 0x12, 0x82, 0xcb, 0xe3, 0x5b, + 0x50, 0xb7, 0x88, 0x8a, 0x53, 0x55, 0xb1, 0x2b, 0x11, 0xfa, 0x64, 0xeb, 0xfe, 0xdf, 0xc6, 0xdc, + 0xfd, 0x63, 0xa3, 0xf4, 0xf0, 0xd8, 0x28, 0xfd, 0xf3, 0xd8, 0x28, 0x7d, 0xf7, 0x5f, 0x63, 0xee, + 0x7a, 0x49, 0xde, 0x61, 0x8e, 0x5e, 0x04, 0x00, 0x00, 0xff, 0xff, 0xed, 0x36, 0xf0, 0x6f, 0x1b, + 0x09, 0x00, 0x00, +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto new file mode 100644 index 0000000000..7111f4572b --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto @@ -0,0 +1,75 @@ +syntax = "proto3"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; +import "etcdserver.proto"; +import "rpc.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message RequestHeader { + uint64 ID = 1; + // username is a username that is associated with an auth token of gRPC connection + string username = 2; + // auth_revision is a revision number of auth.authStore. It is not related to mvcc + uint64 auth_revision = 3; +} + +// An InternalRaftRequest is the union of all requests which can be +// sent via raft. +message InternalRaftRequest { + RequestHeader header = 100; + uint64 ID = 1; + + Request v2 = 2; + + RangeRequest range = 3; + PutRequest put = 4; + DeleteRangeRequest delete_range = 5; + TxnRequest txn = 6; + CompactionRequest compaction = 7; + + LeaseGrantRequest lease_grant = 8; + LeaseRevokeRequest lease_revoke = 9; + + AlarmRequest alarm = 10; + + LeaseCheckpointRequest lease_checkpoint = 11; + + AuthEnableRequest auth_enable = 1000; + AuthDisableRequest auth_disable = 1011; + + InternalAuthenticateRequest authenticate = 1012; + + AuthUserAddRequest auth_user_add = 1100; + AuthUserDeleteRequest auth_user_delete = 1101; + AuthUserGetRequest auth_user_get = 1102; + AuthUserChangePasswordRequest auth_user_change_password = 1103; + AuthUserGrantRoleRequest auth_user_grant_role = 1104; + AuthUserRevokeRoleRequest auth_user_revoke_role = 1105; + AuthUserListRequest auth_user_list = 1106; + AuthRoleListRequest auth_role_list = 1107; + + AuthRoleAddRequest auth_role_add = 1200; + AuthRoleDeleteRequest auth_role_delete = 1201; + AuthRoleGetRequest auth_role_get = 1202; + AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203; + AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204; +} + +message EmptyResponse { +} + +// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? +// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. +// For avoiding misusage the field, we have an internal version of AuthenticateRequest. +message InternalAuthenticateRequest { + string name = 1; + string password = 2; + + // simple_token is generated in API layer (etcdserver/v3_server.go) + string simple_token = 3; +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go new file mode 100644 index 0000000000..31e121ee0a --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go @@ -0,0 +1,183 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserverpb + +import ( + "fmt" + "strings" + + proto "github.com/golang/protobuf/proto" +) + +// InternalRaftStringer implements custom proto Stringer: +// redact password, replace value fields with value_size fields. +type InternalRaftStringer struct { + Request *InternalRaftRequest +} + +func (as *InternalRaftStringer) String() string { + switch { + case as.Request.LeaseGrant != nil: + return fmt.Sprintf("header:<%s> lease_grant:", + as.Request.Header.String(), + as.Request.LeaseGrant.TTL, + as.Request.LeaseGrant.ID, + ) + case as.Request.LeaseRevoke != nil: + return fmt.Sprintf("header:<%s> lease_revoke:", + as.Request.Header.String(), + as.Request.LeaseRevoke.ID, + ) + case as.Request.Authenticate != nil: + return fmt.Sprintf("header:<%s> authenticate:", + as.Request.Header.String(), + as.Request.Authenticate.Name, + as.Request.Authenticate.SimpleToken, + ) + case as.Request.AuthUserAdd != nil: + return fmt.Sprintf("header:<%s> auth_user_add:", + as.Request.Header.String(), + as.Request.AuthUserAdd.Name, + ) + case as.Request.AuthUserChangePassword != nil: + return fmt.Sprintf("header:<%s> auth_user_change_password:", + as.Request.Header.String(), + as.Request.AuthUserChangePassword.Name, + ) + case as.Request.Put != nil: + return fmt.Sprintf("header:<%s> put:<%s>", + as.Request.Header.String(), + NewLoggablePutRequest(as.Request.Put).String(), + ) + case as.Request.Txn != nil: + return fmt.Sprintf("header:<%s> txn:<%s>", + as.Request.Header.String(), + NewLoggableTxnRequest(as.Request.Txn).String(), + ) + default: + // nothing to redact + } + return as.Request.String() +} + +// txnRequestStringer implements a custom proto String to replace value bytes fields with value size +// fields in any nested txn and put operations. +type txnRequestStringer struct { + Request *TxnRequest +} + +func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer { + return &txnRequestStringer{request} +} + +func (as *txnRequestStringer) String() string { + var compare []string + for _, c := range as.Request.Compare { + switch cv := c.TargetUnion.(type) { + case *Compare_Value: + compare = append(compare, newLoggableValueCompare(c, cv).String()) + default: + // nothing to redact + compare = append(compare, c.String()) + } + } + var success []string + for _, s := range as.Request.Success { + success = append(success, newLoggableRequestOp(s).String()) + } + var failure []string + for _, f := range as.Request.Failure { + failure = append(failure, newLoggableRequestOp(f).String()) + } + return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>", + strings.Join(compare, " "), + strings.Join(success, " "), + strings.Join(failure, " "), + ) +} + +// requestOpStringer implements a custom proto String to replace value bytes fields with value +// size fields in any nested txn and put operations. +type requestOpStringer struct { + Op *RequestOp +} + +func newLoggableRequestOp(op *RequestOp) *requestOpStringer { + return &requestOpStringer{op} +} + +func (as *requestOpStringer) String() string { + switch op := as.Op.Request.(type) { + case *RequestOp_RequestPut: + return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String()) + case *RequestOp_RequestTxn: + return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String()) + default: + // nothing to redact + } + return as.Op.String() +} + +// loggableValueCompare implements a custom proto String for Compare.Value union member types to +// replace the value bytes field with a value size field. +// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here. +type loggableValueCompare struct { + Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"` + Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3"` + ValueSize int64 `protobuf:"varint,7,opt,name=value_size,proto3"` + RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,proto3"` +} + +func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare { + return &loggableValueCompare{ + c.Result, + c.Target, + c.Key, + int64(len(cv.Value)), + c.RangeEnd, + } +} + +func (m *loggableValueCompare) Reset() { *m = loggableValueCompare{} } +func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) } +func (*loggableValueCompare) ProtoMessage() {} + +// loggablePutRequest implements a custom proto String to replace value bytes field with a value +// size field. +// To preserve proto encoding of the key bytes, a faked out proto type is used here. +type loggablePutRequest struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3"` + ValueSize int64 `protobuf:"varint,2,opt,name=value_size,proto3"` + Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"` + PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"` + IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,proto3"` + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"` +} + +func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest { + return &loggablePutRequest{ + request.Key, + int64(len(request.Value)), + request.Lease, + request.PrevKv, + request.IgnoreValue, + request.IgnoreLease, + } +} + +func (m *loggablePutRequest) Reset() { *m = loggablePutRequest{} } +func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) } +func (*loggablePutRequest) ProtoMessage() {} diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go new file mode 100644 index 0000000000..6cbccc797c --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -0,0 +1,20088 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: rpc.proto + +package etcdserverpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + mvccpb "go.etcd.io/etcd/mvcc/mvccpb" + + authpb "go.etcd.io/etcd/auth/authpb" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AlarmType int32 + +const ( + AlarmType_NONE AlarmType = 0 + AlarmType_NOSPACE AlarmType = 1 + AlarmType_CORRUPT AlarmType = 2 +) + +var AlarmType_name = map[int32]string{ + 0: "NONE", + 1: "NOSPACE", + 2: "CORRUPT", +} +var AlarmType_value = map[string]int32{ + "NONE": 0, + "NOSPACE": 1, + "CORRUPT": 2, +} + +func (x AlarmType) String() string { + return proto.EnumName(AlarmType_name, int32(x)) +} +func (AlarmType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } + +type RangeRequest_SortOrder int32 + +const ( + RangeRequest_NONE RangeRequest_SortOrder = 0 + RangeRequest_ASCEND RangeRequest_SortOrder = 1 + RangeRequest_DESCEND RangeRequest_SortOrder = 2 +) + +var RangeRequest_SortOrder_name = map[int32]string{ + 0: "NONE", + 1: "ASCEND", + 2: "DESCEND", +} +var RangeRequest_SortOrder_value = map[string]int32{ + "NONE": 0, + "ASCEND": 1, + "DESCEND": 2, +} + +func (x RangeRequest_SortOrder) String() string { + return proto.EnumName(RangeRequest_SortOrder_name, int32(x)) +} +func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 0} } + +type RangeRequest_SortTarget int32 + +const ( + RangeRequest_KEY RangeRequest_SortTarget = 0 + RangeRequest_VERSION RangeRequest_SortTarget = 1 + RangeRequest_CREATE RangeRequest_SortTarget = 2 + RangeRequest_MOD RangeRequest_SortTarget = 3 + RangeRequest_VALUE RangeRequest_SortTarget = 4 +) + +var RangeRequest_SortTarget_name = map[int32]string{ + 0: "KEY", + 1: "VERSION", + 2: "CREATE", + 3: "MOD", + 4: "VALUE", +} +var RangeRequest_SortTarget_value = map[string]int32{ + "KEY": 0, + "VERSION": 1, + "CREATE": 2, + "MOD": 3, + "VALUE": 4, +} + +func (x RangeRequest_SortTarget) String() string { + return proto.EnumName(RangeRequest_SortTarget_name, int32(x)) +} +func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{1, 1} +} + +type Compare_CompareResult int32 + +const ( + Compare_EQUAL Compare_CompareResult = 0 + Compare_GREATER Compare_CompareResult = 1 + Compare_LESS Compare_CompareResult = 2 + Compare_NOT_EQUAL Compare_CompareResult = 3 +) + +var Compare_CompareResult_name = map[int32]string{ + 0: "EQUAL", + 1: "GREATER", + 2: "LESS", + 3: "NOT_EQUAL", +} +var Compare_CompareResult_value = map[string]int32{ + "EQUAL": 0, + "GREATER": 1, + "LESS": 2, + "NOT_EQUAL": 3, +} + +func (x Compare_CompareResult) String() string { + return proto.EnumName(Compare_CompareResult_name, int32(x)) +} +func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 0} } + +type Compare_CompareTarget int32 + +const ( + Compare_VERSION Compare_CompareTarget = 0 + Compare_CREATE Compare_CompareTarget = 1 + Compare_MOD Compare_CompareTarget = 2 + Compare_VALUE Compare_CompareTarget = 3 + Compare_LEASE Compare_CompareTarget = 4 +) + +var Compare_CompareTarget_name = map[int32]string{ + 0: "VERSION", + 1: "CREATE", + 2: "MOD", + 3: "VALUE", + 4: "LEASE", +} +var Compare_CompareTarget_value = map[string]int32{ + "VERSION": 0, + "CREATE": 1, + "MOD": 2, + "VALUE": 3, + "LEASE": 4, +} + +func (x Compare_CompareTarget) String() string { + return proto.EnumName(Compare_CompareTarget_name, int32(x)) +} +func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 1} } + +type WatchCreateRequest_FilterType int32 + +const ( + // filter out put event. + WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0 + // filter out delete event. + WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1 +) + +var WatchCreateRequest_FilterType_name = map[int32]string{ + 0: "NOPUT", + 1: "NODELETE", +} +var WatchCreateRequest_FilterType_value = map[string]int32{ + "NOPUT": 0, + "NODELETE": 1, +} + +func (x WatchCreateRequest_FilterType) String() string { + return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) +} +func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{21, 0} +} + +type AlarmRequest_AlarmAction int32 + +const ( + AlarmRequest_GET AlarmRequest_AlarmAction = 0 + AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1 + AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2 +) + +var AlarmRequest_AlarmAction_name = map[int32]string{ + 0: "GET", + 1: "ACTIVATE", + 2: "DEACTIVATE", +} +var AlarmRequest_AlarmAction_value = map[string]int32{ + "GET": 0, + "ACTIVATE": 1, + "DEACTIVATE": 2, +} + +func (x AlarmRequest_AlarmAction) String() string { + return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) +} +func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{54, 0} +} + +type ResponseHeader struct { + // cluster_id is the ID of the cluster which sent the response. + ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // member_id is the ID of the member which sent the response. + MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` + // revision is the key-value store revision when the request was applied. + // For watch progress responses, the header.revision indicates progress. All future events + // recieved in this stream are guaranteed to have a higher revision number than the + // header.revision number. + Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` + // raft_term is the raft term when the request was applied. + RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` +} + +func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } +func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } +func (*ResponseHeader) ProtoMessage() {} +func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } + +func (m *ResponseHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *ResponseHeader) GetMemberId() uint64 { + if m != nil { + return m.MemberId + } + return 0 +} + +func (m *ResponseHeader) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *ResponseHeader) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + +type RangeRequest struct { + // key is the first key for the range. If range_end is not given, the request only looks up key. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the upper bound on the requested range [key, range_end). + // If range_end is '\0', the range is all keys >= key. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. + Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + // revision is the point-in-time of the key-value store to use for the range. + // If revision is less or equal to zero, the range is over the newest key-value store. + // If the revision has been compacted, ErrCompacted is returned as a response. + Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` + // sort_order is the order for returned sorted results. + SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"` + // sort_target is the key-value field to use for sorting. + SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"` + // serializable sets the range request to use serializable member-local reads. + // Range requests are linearizable by default; linearizable requests have higher + // latency and lower throughput than serializable requests but reflect the current + // consensus of the cluster. For better performance, in exchange for possible stale reads, + // a serializable range request is served locally without needing to reach consensus + // with other nodes in the cluster. + Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"` + // keys_only when set returns only the keys and not the values. + KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"` + // count_only when set returns only the count of the keys in the range. + CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"` + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"` + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create revisions will be filtered away. + MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` +} + +func (m *RangeRequest) Reset() { *m = RangeRequest{} } +func (m *RangeRequest) String() string { return proto.CompactTextString(m) } +func (*RangeRequest) ProtoMessage() {} +func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } + +func (m *RangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *RangeRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *RangeRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { + if m != nil { + return m.SortOrder + } + return RangeRequest_NONE +} + +func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { + if m != nil { + return m.SortTarget + } + return RangeRequest_KEY +} + +func (m *RangeRequest) GetSerializable() bool { + if m != nil { + return m.Serializable + } + return false +} + +func (m *RangeRequest) GetKeysOnly() bool { + if m != nil { + return m.KeysOnly + } + return false +} + +func (m *RangeRequest) GetCountOnly() bool { + if m != nil { + return m.CountOnly + } + return false +} + +func (m *RangeRequest) GetMinModRevision() int64 { + if m != nil { + return m.MinModRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxModRevision() int64 { + if m != nil { + return m.MaxModRevision + } + return 0 +} + +func (m *RangeRequest) GetMinCreateRevision() int64 { + if m != nil { + return m.MinCreateRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxCreateRevision() int64 { + if m != nil { + return m.MaxCreateRevision + } + return 0 +} + +type RangeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // kvs is the list of key-value pairs matched by the range request. + // kvs is empty when count is requested. + Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"` + // more indicates if there are more keys to return in the requested range. + More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` + // count is set to the number of keys within the range when requested. + Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` +} + +func (m *RangeResponse) Reset() { *m = RangeResponse{} } +func (m *RangeResponse) String() string { return proto.CompactTextString(m) } +func (*RangeResponse) ProtoMessage() {} +func (*RangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} } + +func (m *RangeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { + if m != nil { + return m.Kvs + } + return nil +} + +func (m *RangeResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +func (m *RangeResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +type PutRequest struct { + // key is the key, in bytes, to put into the key-value store. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // value is the value, in bytes, to associate with the key in the key-value store. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // lease is the lease ID to associate with the key in the key-value store. A lease + // value of 0 indicates no lease. + Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"` + // If prev_kv is set, etcd gets the previous key-value pair before changing it. + // The previous key-value pair will be returned in the put response. + PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} +func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } + +func (m *PutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *PutRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *PutRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *PutRequest) GetIgnoreValue() bool { + if m != nil { + return m.IgnoreValue + } + return false +} + +func (m *PutRequest) GetIgnoreLease() bool { + if m != nil { + return m.IgnoreLease + } + return false +} + +type PutResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // if prev_kv is set in the request, the previous key-value pair will be returned. + PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} +func (*PutResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} } + +func (m *PutResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue { + if m != nil { + return m.PrevKv + } + return nil +} + +type DeleteRangeRequest struct { + // key is the first key to delete in the range. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the key following the last key to delete for the range [key, range_end). + // If range_end is not given, the range is defined to contain only the key argument. + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). + // If range_end is '\0', the range is all keys greater than or equal to the key argument. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. + // The previous key-value pairs will be returned in the delete response. + PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` +} + +func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } +func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeRequest) ProtoMessage() {} +func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } + +func (m *DeleteRangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *DeleteRangeRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +type DeleteRangeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // deleted is the number of keys deleted by the delete range request. + Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` + // if prev_kv is set in the request, the previous key-value pairs will be returned. + PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs" json:"prev_kvs,omitempty"` +} + +func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } +func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeResponse) ProtoMessage() {} +func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{6} } + +func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRangeResponse) GetDeleted() int64 { + if m != nil { + return m.Deleted + } + return 0 +} + +func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { + if m != nil { + return m.PrevKvs + } + return nil +} + +type RequestOp struct { + // request is a union of request types accepted by a transaction. + // + // Types that are valid to be assigned to Request: + // *RequestOp_RequestRange + // *RequestOp_RequestPut + // *RequestOp_RequestDeleteRange + // *RequestOp_RequestTxn + Request isRequestOp_Request `protobuf_oneof:"request"` +} + +func (m *RequestOp) Reset() { *m = RequestOp{} } +func (m *RequestOp) String() string { return proto.CompactTextString(m) } +func (*RequestOp) ProtoMessage() {} +func (*RequestOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{7} } + +type isRequestOp_Request interface { + isRequestOp_Request() + MarshalTo([]byte) (int, error) + Size() int +} + +type RequestOp_RequestRange struct { + RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,oneof"` +} +type RequestOp_RequestPut struct { + RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,oneof"` +} +type RequestOp_RequestDeleteRange struct { + RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"` +} +type RequestOp_RequestTxn struct { + RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,oneof"` +} + +func (*RequestOp_RequestRange) isRequestOp_Request() {} +func (*RequestOp_RequestPut) isRequestOp_Request() {} +func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {} +func (*RequestOp_RequestTxn) isRequestOp_Request() {} + +func (m *RequestOp) GetRequest() isRequestOp_Request { + if m != nil { + return m.Request + } + return nil +} + +func (m *RequestOp) GetRequestRange() *RangeRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok { + return x.RequestRange + } + return nil +} + +func (m *RequestOp) GetRequestPut() *PutRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok { + return x.RequestPut + } + return nil +} + +func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok { + return x.RequestDeleteRange + } + return nil +} + +func (m *RequestOp) GetRequestTxn() *TxnRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok { + return x.RequestTxn + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{ + (*RequestOp_RequestRange)(nil), + (*RequestOp_RequestPut)(nil), + (*RequestOp_RequestDeleteRange)(nil), + (*RequestOp_RequestTxn)(nil), + } +} + +func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RequestOp) + // request + switch x := m.Request.(type) { + case *RequestOp_RequestRange: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestRange); err != nil { + return err + } + case *RequestOp_RequestPut: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestPut); err != nil { + return err + } + case *RequestOp_RequestDeleteRange: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestDeleteRange); err != nil { + return err + } + case *RequestOp_RequestTxn: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestTxn); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RequestOp.Request has unexpected type %T", x) + } + return nil +} + +func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RequestOp) + switch tag { + case 1: // request.request_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RangeRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestRange{msg} + return true, err + case 2: // request.request_put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PutRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestPut{msg} + return true, err + case 3: // request.request_delete_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteRangeRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestDeleteRange{msg} + return true, err + case 4: // request.request_txn + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TxnRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestTxn{msg} + return true, err + default: + return false, nil + } +} + +func _RequestOp_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RequestOp) + // request + switch x := m.Request.(type) { + case *RequestOp_RequestRange: + s := proto.Size(x.RequestRange) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RequestOp_RequestPut: + s := proto.Size(x.RequestPut) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RequestOp_RequestDeleteRange: + s := proto.Size(x.RequestDeleteRange) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RequestOp_RequestTxn: + s := proto.Size(x.RequestTxn) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ResponseOp struct { + // response is a union of response types returned by a transaction. + // + // Types that are valid to be assigned to Response: + // *ResponseOp_ResponseRange + // *ResponseOp_ResponsePut + // *ResponseOp_ResponseDeleteRange + // *ResponseOp_ResponseTxn + Response isResponseOp_Response `protobuf_oneof:"response"` +} + +func (m *ResponseOp) Reset() { *m = ResponseOp{} } +func (m *ResponseOp) String() string { return proto.CompactTextString(m) } +func (*ResponseOp) ProtoMessage() {} +func (*ResponseOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{8} } + +type isResponseOp_Response interface { + isResponseOp_Response() + MarshalTo([]byte) (int, error) + Size() int +} + +type ResponseOp_ResponseRange struct { + ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,oneof"` +} +type ResponseOp_ResponsePut struct { + ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,oneof"` +} +type ResponseOp_ResponseDeleteRange struct { + ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"` +} +type ResponseOp_ResponseTxn struct { + ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,oneof"` +} + +func (*ResponseOp_ResponseRange) isResponseOp_Response() {} +func (*ResponseOp_ResponsePut) isResponseOp_Response() {} +func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {} +func (*ResponseOp_ResponseTxn) isResponseOp_Response() {} + +func (m *ResponseOp) GetResponse() isResponseOp_Response { + if m != nil { + return m.Response + } + return nil +} + +func (m *ResponseOp) GetResponseRange() *RangeResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok { + return x.ResponseRange + } + return nil +} + +func (m *ResponseOp) GetResponsePut() *PutResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok { + return x.ResponsePut + } + return nil +} + +func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok { + return x.ResponseDeleteRange + } + return nil +} + +func (m *ResponseOp) GetResponseTxn() *TxnResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok { + return x.ResponseTxn + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{ + (*ResponseOp_ResponseRange)(nil), + (*ResponseOp_ResponsePut)(nil), + (*ResponseOp_ResponseDeleteRange)(nil), + (*ResponseOp_ResponseTxn)(nil), + } +} + +func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResponseOp) + // response + switch x := m.Response.(type) { + case *ResponseOp_ResponseRange: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseRange); err != nil { + return err + } + case *ResponseOp_ResponsePut: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponsePut); err != nil { + return err + } + case *ResponseOp_ResponseDeleteRange: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil { + return err + } + case *ResponseOp_ResponseTxn: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseTxn); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ResponseOp.Response has unexpected type %T", x) + } + return nil +} + +func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResponseOp) + switch tag { + case 1: // response.response_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RangeResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseRange{msg} + return true, err + case 2: // response.response_put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PutResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponsePut{msg} + return true, err + case 3: // response.response_delete_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteRangeResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseDeleteRange{msg} + return true, err + case 4: // response.response_txn + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TxnResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseTxn{msg} + return true, err + default: + return false, nil + } +} + +func _ResponseOp_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ResponseOp) + // response + switch x := m.Response.(type) { + case *ResponseOp_ResponseRange: + s := proto.Size(x.ResponseRange) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseOp_ResponsePut: + s := proto.Size(x.ResponsePut) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseOp_ResponseDeleteRange: + s := proto.Size(x.ResponseDeleteRange) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseOp_ResponseTxn: + s := proto.Size(x.ResponseTxn) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Compare struct { + // result is logical comparison operation for this comparison. + Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"` + // target is the key-value field to inspect for the comparison. + Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"` + // key is the subject key for the comparison operation. + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Types that are valid to be assigned to TargetUnion: + // *Compare_Version + // *Compare_CreateRevision + // *Compare_ModRevision + // *Compare_Value + // *Compare_Lease + TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *Compare) Reset() { *m = Compare{} } +func (m *Compare) String() string { return proto.CompactTextString(m) } +func (*Compare) ProtoMessage() {} +func (*Compare) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9} } + +type isCompare_TargetUnion interface { + isCompare_TargetUnion() + MarshalTo([]byte) (int, error) + Size() int +} + +type Compare_Version struct { + Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"` +} +type Compare_CreateRevision struct { + CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"` +} +type Compare_ModRevision struct { + ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"` +} +type Compare_Value struct { + Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"` +} +type Compare_Lease struct { + Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof"` +} + +func (*Compare_Version) isCompare_TargetUnion() {} +func (*Compare_CreateRevision) isCompare_TargetUnion() {} +func (*Compare_ModRevision) isCompare_TargetUnion() {} +func (*Compare_Value) isCompare_TargetUnion() {} +func (*Compare_Lease) isCompare_TargetUnion() {} + +func (m *Compare) GetTargetUnion() isCompare_TargetUnion { + if m != nil { + return m.TargetUnion + } + return nil +} + +func (m *Compare) GetResult() Compare_CompareResult { + if m != nil { + return m.Result + } + return Compare_EQUAL +} + +func (m *Compare) GetTarget() Compare_CompareTarget { + if m != nil { + return m.Target + } + return Compare_VERSION +} + +func (m *Compare) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Compare) GetVersion() int64 { + if x, ok := m.GetTargetUnion().(*Compare_Version); ok { + return x.Version + } + return 0 +} + +func (m *Compare) GetCreateRevision() int64 { + if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok { + return x.CreateRevision + } + return 0 +} + +func (m *Compare) GetModRevision() int64 { + if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok { + return x.ModRevision + } + return 0 +} + +func (m *Compare) GetValue() []byte { + if x, ok := m.GetTargetUnion().(*Compare_Value); ok { + return x.Value + } + return nil +} + +func (m *Compare) GetLease() int64 { + if x, ok := m.GetTargetUnion().(*Compare_Lease); ok { + return x.Lease + } + return 0 +} + +func (m *Compare) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{ + (*Compare_Version)(nil), + (*Compare_CreateRevision)(nil), + (*Compare_ModRevision)(nil), + (*Compare_Value)(nil), + (*Compare_Lease)(nil), + } +} + +func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Compare) + // target_union + switch x := m.TargetUnion.(type) { + case *Compare_Version: + _ = b.EncodeVarint(4<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Version)) + case *Compare_CreateRevision: + _ = b.EncodeVarint(5<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.CreateRevision)) + case *Compare_ModRevision: + _ = b.EncodeVarint(6<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.ModRevision)) + case *Compare_Value: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + _ = b.EncodeRawBytes(x.Value) + case *Compare_Lease: + _ = b.EncodeVarint(8<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Lease)) + case nil: + default: + return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x) + } + return nil +} + +func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Compare) + switch tag { + case 4: // target_union.version + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_Version{int64(x)} + return true, err + case 5: // target_union.create_revision + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_CreateRevision{int64(x)} + return true, err + case 6: // target_union.mod_revision + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_ModRevision{int64(x)} + return true, err + case 7: // target_union.value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.TargetUnion = &Compare_Value{x} + return true, err + case 8: // target_union.lease + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_Lease{int64(x)} + return true, err + default: + return false, nil + } +} + +func _Compare_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Compare) + // target_union + switch x := m.TargetUnion.(type) { + case *Compare_Version: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Version)) + case *Compare_CreateRevision: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.CreateRevision)) + case *Compare_ModRevision: + n += proto.SizeVarint(6<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.ModRevision)) + case *Compare_Value: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Value))) + n += len(x.Value) + case *Compare_Lease: + n += proto.SizeVarint(8<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Lease)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// From google paxosdb paper: +// Our implementation hinges around a powerful primitive which we call MultiOp. All other database +// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically +// and consists of three components: +// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check +// for the absence or presence of a value, or compare with a given value. Two different tests in the guard +// may apply to the same or different entries in the database. All tests in the guard are applied and +// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise +// it executes f op (see item 3 below). +// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or +// lookup operation, and applies to a single database entry. Two different operations in the list may apply +// to the same or different entries in the database. These operations are executed +// if guard evaluates to +// true. +// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. +type TxnRequest struct { + // compare is a list of predicates representing a conjunction of terms. + // If the comparisons succeed, then the success requests will be processed in order, + // and the response will contain their respective responses in order. + // If the comparisons fail, then the failure requests will be processed in order, + // and the response will contain their respective responses in order. + Compare []*Compare `protobuf:"bytes,1,rep,name=compare" json:"compare,omitempty"` + // success is a list of requests which will be applied when compare evaluates to true. + Success []*RequestOp `protobuf:"bytes,2,rep,name=success" json:"success,omitempty"` + // failure is a list of requests which will be applied when compare evaluates to false. + Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure" json:"failure,omitempty"` +} + +func (m *TxnRequest) Reset() { *m = TxnRequest{} } +func (m *TxnRequest) String() string { return proto.CompactTextString(m) } +func (*TxnRequest) ProtoMessage() {} +func (*TxnRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{10} } + +func (m *TxnRequest) GetCompare() []*Compare { + if m != nil { + return m.Compare + } + return nil +} + +func (m *TxnRequest) GetSuccess() []*RequestOp { + if m != nil { + return m.Success + } + return nil +} + +func (m *TxnRequest) GetFailure() []*RequestOp { + if m != nil { + return m.Failure + } + return nil +} + +type TxnResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // succeeded is set to true if the compare evaluated to true or false otherwise. + Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"` + // responses is a list of responses corresponding to the results from applying + // success if succeeded is true or failure if succeeded is false. + Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses" json:"responses,omitempty"` +} + +func (m *TxnResponse) Reset() { *m = TxnResponse{} } +func (m *TxnResponse) String() string { return proto.CompactTextString(m) } +func (*TxnResponse) ProtoMessage() {} +func (*TxnResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{11} } + +func (m *TxnResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TxnResponse) GetSucceeded() bool { + if m != nil { + return m.Succeeded + } + return false +} + +func (m *TxnResponse) GetResponses() []*ResponseOp { + if m != nil { + return m.Responses + } + return nil +} + +// CompactionRequest compacts the key-value store up to a given revision. All superseded keys +// with a revision less than the compaction revision will be removed. +type CompactionRequest struct { + // revision is the key-value store revision for the compaction operation. + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` + // physical is set so the RPC will wait until the compaction is physically + // applied to the local database such that compacted entries are totally + // removed from the backend database. + Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` +} + +func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } +func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } +func (*CompactionRequest) ProtoMessage() {} +func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } + +func (m *CompactionRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *CompactionRequest) GetPhysical() bool { + if m != nil { + return m.Physical + } + return false +} + +type CompactionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } +func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } +func (*CompactionResponse) ProtoMessage() {} +func (*CompactionResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{13} } + +func (m *CompactionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type HashRequest struct { +} + +func (m *HashRequest) Reset() { *m = HashRequest{} } +func (m *HashRequest) String() string { return proto.CompactTextString(m) } +func (*HashRequest) ProtoMessage() {} +func (*HashRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{14} } + +type HashKVRequest struct { + // revision is the key-value store revision for the hash operation. + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` +} + +func (m *HashKVRequest) Reset() { *m = HashKVRequest{} } +func (m *HashKVRequest) String() string { return proto.CompactTextString(m) } +func (*HashKVRequest) ProtoMessage() {} +func (*HashKVRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{15} } + +func (m *HashKVRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +type HashKVResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // hash is the hash value computed from the responding member's MVCC keys up to a given revision. + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` + // compact_revision is the compacted revision of key-value store when hash begins. + CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` +} + +func (m *HashKVResponse) Reset() { *m = HashKVResponse{} } +func (m *HashKVResponse) String() string { return proto.CompactTextString(m) } +func (*HashKVResponse) ProtoMessage() {} +func (*HashKVResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{16} } + +func (m *HashKVResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *HashKVResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + +func (m *HashKVResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +type HashResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // hash is the hash value computed from the responding member's KV's backend. + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *HashResponse) Reset() { *m = HashResponse{} } +func (m *HashResponse) String() string { return proto.CompactTextString(m) } +func (*HashResponse) ProtoMessage() {} +func (*HashResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{17} } + +func (m *HashResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *HashResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + +type SnapshotRequest struct { +} + +func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } +func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*SnapshotRequest) ProtoMessage() {} +func (*SnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{18} } + +type SnapshotResponse struct { + // header has the current key-value store information. The first header in the snapshot + // stream indicates the point in time of the snapshot. + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // remaining_bytes is the number of blob bytes to be sent after this message + RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` + // blob contains the next chunk of the snapshot in the snapshot stream. + Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` +} + +func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } +func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*SnapshotResponse) ProtoMessage() {} +func (*SnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } + +func (m *SnapshotResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SnapshotResponse) GetRemainingBytes() uint64 { + if m != nil { + return m.RemainingBytes + } + return 0 +} + +func (m *SnapshotResponse) GetBlob() []byte { + if m != nil { + return m.Blob + } + return nil +} + +type WatchRequest struct { + // request_union is a request to either create a new watcher or cancel an existing watcher. + // + // Types that are valid to be assigned to RequestUnion: + // *WatchRequest_CreateRequest + // *WatchRequest_CancelRequest + // *WatchRequest_ProgressRequest + RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` +} + +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (m *WatchRequest) String() string { return proto.CompactTextString(m) } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } + +type isWatchRequest_RequestUnion interface { + isWatchRequest_RequestUnion() + MarshalTo([]byte) (int, error) + Size() int +} + +type WatchRequest_CreateRequest struct { + CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,oneof"` +} +type WatchRequest_CancelRequest struct { + CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,oneof"` +} +type WatchRequest_ProgressRequest struct { + ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,oneof"` +} + +func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {} +func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {} +func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {} + +func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion { + if m != nil { + return m.RequestUnion + } + return nil +} + +func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok { + return x.CreateRequest + } + return nil +} + +func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok { + return x.CancelRequest + } + return nil +} + +func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok { + return x.ProgressRequest + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{ + (*WatchRequest_CreateRequest)(nil), + (*WatchRequest_CancelRequest)(nil), + (*WatchRequest_ProgressRequest)(nil), + } +} + +func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*WatchRequest) + // request_union + switch x := m.RequestUnion.(type) { + case *WatchRequest_CreateRequest: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CreateRequest); err != nil { + return err + } + case *WatchRequest_CancelRequest: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CancelRequest); err != nil { + return err + } + case *WatchRequest_ProgressRequest: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ProgressRequest); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x) + } + return nil +} + +func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*WatchRequest) + switch tag { + case 1: // request_union.create_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(WatchCreateRequest) + err := b.DecodeMessage(msg) + m.RequestUnion = &WatchRequest_CreateRequest{msg} + return true, err + case 2: // request_union.cancel_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(WatchCancelRequest) + err := b.DecodeMessage(msg) + m.RequestUnion = &WatchRequest_CancelRequest{msg} + return true, err + case 3: // request_union.progress_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(WatchProgressRequest) + err := b.DecodeMessage(msg) + m.RequestUnion = &WatchRequest_ProgressRequest{msg} + return true, err + default: + return false, nil + } +} + +func _WatchRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*WatchRequest) + // request_union + switch x := m.RequestUnion.(type) { + case *WatchRequest_CreateRequest: + s := proto.Size(x.CreateRequest) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *WatchRequest_CancelRequest: + s := proto.Size(x.CancelRequest) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *WatchRequest_ProgressRequest: + s := proto.Size(x.ProgressRequest) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type WatchCreateRequest struct { + // key is the key to register for watching. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, + // only the key argument is watched. If range_end is equal to '\0', all keys greater than + // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". + StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"` + // progress_notify is set so that the etcd server will periodically send a WatchResponse with + // no events to the new watcher if there are no recent events. It is useful when clients + // wish to recover a disconnected watcher starting from a recent known revision. + // The etcd server may decide how often it will send notifications based on current load. + ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` + // filters filter the events at server side before it sends back to the watcher. + Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + // If watch_id is provided and non-zero, it will be assigned to this watcher. + // Since creating a watcher in etcd is not a synchronous operation, + // this can be used ensure that ordering is correct when creating multiple + // watchers on the same stream. Creating a watcher with an ID already in + // use on the stream will cause an error to be returned. + WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` + // fragment enables splitting large revisions into multiple watch responses. + Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"` +} + +func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } +func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCreateRequest) ProtoMessage() {} +func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{21} } + +func (m *WatchCreateRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *WatchCreateRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *WatchCreateRequest) GetStartRevision() int64 { + if m != nil { + return m.StartRevision + } + return 0 +} + +func (m *WatchCreateRequest) GetProgressNotify() bool { + if m != nil { + return m.ProgressNotify + } + return false +} + +func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { + if m != nil { + return m.Filters + } + return nil +} + +func (m *WatchCreateRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *WatchCreateRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchCreateRequest) GetFragment() bool { + if m != nil { + return m.Fragment + } + return false +} + +type WatchCancelRequest struct { + // watch_id is the watcher id to cancel so that no more events are transmitted. + WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` +} + +func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } +func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCancelRequest) ProtoMessage() {} +func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } + +func (m *WatchCancelRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +// Requests the a watch stream progress status be sent in the watch response stream as soon as +// possible. +type WatchProgressRequest struct { +} + +func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} } +func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) } +func (*WatchProgressRequest) ProtoMessage() {} +func (*WatchProgressRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{23} } + +type WatchResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // watch_id is the ID of the watcher that corresponds to the response. + WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` + // created is set to true if the response is for a create watch request. + // The client should record the watch_id and expect to receive events for + // the created watcher from the same stream. + // All events sent to the created watcher will attach with the same watch_id. + Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` + // canceled is set to true if the response is for a cancel watch request. + // No further events will be sent to the canceled watcher. + Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"` + // compact_revision is set to the minimum index if a watcher tries to watch + // at a compacted index. + // + // This happens when creating a watcher at a compacted revision or the watcher cannot + // catch up with the progress of the key-value store. + // + // The client should treat the watcher as canceled and should not try to create any + // watcher with the same start_revision again. + CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + // cancel_reason indicates the reason for canceling the watcher. + CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` + // framgment is true if large watch response was split over multiple responses. + Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` +} + +func (m *WatchResponse) Reset() { *m = WatchResponse{} } +func (m *WatchResponse) String() string { return proto.CompactTextString(m) } +func (*WatchResponse) ProtoMessage() {} +func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } + +func (m *WatchResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *WatchResponse) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchResponse) GetCreated() bool { + if m != nil { + return m.Created + } + return false +} + +func (m *WatchResponse) GetCanceled() bool { + if m != nil { + return m.Canceled + } + return false +} + +func (m *WatchResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +func (m *WatchResponse) GetCancelReason() string { + if m != nil { + return m.CancelReason + } + return "" +} + +func (m *WatchResponse) GetFragment() bool { + if m != nil { + return m.Fragment + } + return false +} + +func (m *WatchResponse) GetEvents() []*mvccpb.Event { + if m != nil { + return m.Events + } + return nil +} + +type LeaseGrantRequest struct { + // TTL is the advisory time-to-live in seconds. Expired lease will return -1. + TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } +func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantRequest) ProtoMessage() {} +func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{25} } + +func (m *LeaseGrantRequest) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseGrantResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID for the granted lease. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the server chosen lease time-to-live in seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } +func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantResponse) ProtoMessage() {} +func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } + +func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseGrantResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseGrantResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type LeaseRevokeRequest struct { + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } +func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeRequest) ProtoMessage() {} +func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{27} } + +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseRevokeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } +func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeResponse) ProtoMessage() {} +func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } + +func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type LeaseCheckpoint struct { + // ID is the lease ID to checkpoint. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Remaining_TTL is the remaining time until expiry of the lease. + Remaining_TTL int64 `protobuf:"varint,2,opt,name=remaining_TTL,json=remainingTTL,proto3" json:"remaining_TTL,omitempty"` +} + +func (m *LeaseCheckpoint) Reset() { *m = LeaseCheckpoint{} } +func (m *LeaseCheckpoint) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpoint) ProtoMessage() {} +func (*LeaseCheckpoint) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } + +func (m *LeaseCheckpoint) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseCheckpoint) GetRemaining_TTL() int64 { + if m != nil { + return m.Remaining_TTL + } + return 0 +} + +type LeaseCheckpointRequest struct { + Checkpoints []*LeaseCheckpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"` +} + +func (m *LeaseCheckpointRequest) Reset() { *m = LeaseCheckpointRequest{} } +func (m *LeaseCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpointRequest) ProtoMessage() {} +func (*LeaseCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } + +func (m *LeaseCheckpointRequest) GetCheckpoints() []*LeaseCheckpoint { + if m != nil { + return m.Checkpoints + } + return nil +} + +type LeaseCheckpointResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *LeaseCheckpointResponse) Reset() { *m = LeaseCheckpointResponse{} } +func (m *LeaseCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpointResponse) ProtoMessage() {} +func (*LeaseCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } + +func (m *LeaseCheckpointResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type LeaseKeepAliveRequest struct { + // ID is the lease ID for the lease to keep alive. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } +func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveRequest) ProtoMessage() {} +func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} } + +func (m *LeaseKeepAliveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseKeepAliveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID from the keep alive request. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the new time-to-live for the lease. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` +} + +func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } +func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveResponse) ProtoMessage() {} +func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } + +func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseKeepAliveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseKeepAliveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +type LeaseTimeToLiveRequest struct { + // ID is the lease ID for the lease. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // keys is true to query all the keys attached to this lease. + Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` +} + +func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } +func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveRequest) ProtoMessage() {} +func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} } + +func (m *LeaseTimeToLiveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveRequest) GetKeys() bool { + if m != nil { + return m.Keys + } + return false +} + +type LeaseTimeToLiveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID from the keep alive request. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` + // Keys is the list of keys attached to this lease. + Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"` +} + +func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } +func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveResponse) ProtoMessage() {} +func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } + +func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseTimeToLiveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { + if m != nil { + return m.GrantedTTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { + if m != nil { + return m.Keys + } + return nil +} + +type LeaseLeasesRequest struct { +} + +func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} } +func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesRequest) ProtoMessage() {} +func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} } + +type LeaseStatus struct { + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } +func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } +func (*LeaseStatus) ProtoMessage() {} +func (*LeaseStatus) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} } + +func (m *LeaseStatus) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseLeasesResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases" json:"leases,omitempty"` +} + +func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} } +func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesResponse) ProtoMessage() {} +func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} } + +func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus { + if m != nil { + return m.Leases + } + return nil +} + +type Member struct { + // ID is the member ID for this member. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // name is the human-readable name of the member. If the member is not started, the name will be an empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // peerURLs is the list of URLs the member exposes to the cluster for communication. + PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs" json:"peerURLs,omitempty"` + // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. + ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs" json:"clientURLs,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,5,opt,name=isLearner,proto3" json:"isLearner,omitempty"` +} + +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} } + +func (m *Member) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Member) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Member) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *Member) GetClientURLs() []string { + if m != nil { + return m.ClientURLs + } + return nil +} + +func (m *Member) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + +type MemberAddRequest struct { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` + // isLearner indicates if the added member is raft learner. + IsLearner bool `protobuf:"varint,2,opt,name=isLearner,proto3" json:"isLearner,omitempty"` +} + +func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } +func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } +func (*MemberAddRequest) ProtoMessage() {} +func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} } + +func (m *MemberAddRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *MemberAddRequest) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + +type MemberAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // member is the member information for the added member. + Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` + // members is a list of all members after adding the new member. + Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } +func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } +func (*MemberAddResponse) ProtoMessage() {} +func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } + +func (m *MemberAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberAddResponse) GetMember() *Member { + if m != nil { + return m.Member + } + return nil +} + +func (m *MemberAddResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberRemoveRequest struct { + // ID is the member ID of the member to remove. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } +func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveRequest) ProtoMessage() {} +func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } + +func (m *MemberRemoveRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +type MemberRemoveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after removing the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } +func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveResponse) ProtoMessage() {} +func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } + +func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberRemoveResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberUpdateRequest struct { + // ID is the member ID of the member to update. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // peerURLs is the new list of URLs the member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs" json:"peerURLs,omitempty"` +} + +func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } +func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateRequest) ProtoMessage() {} +func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } + +func (m *MemberUpdateRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *MemberUpdateRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +type MemberUpdateResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after updating the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } +func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateResponse) ProtoMessage() {} +func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } + +func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberUpdateResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberListRequest struct { +} + +func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } +func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } +func (*MemberListRequest) ProtoMessage() {} +func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } + +type MemberListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members associated with the cluster. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } +func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } +func (*MemberListResponse) ProtoMessage() {} +func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } + +func (m *MemberListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberListResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberPromoteRequest struct { + // ID is the member ID of the member to promote. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *MemberPromoteRequest) Reset() { *m = MemberPromoteRequest{} } +func (m *MemberPromoteRequest) String() string { return proto.CompactTextString(m) } +func (*MemberPromoteRequest) ProtoMessage() {} +func (*MemberPromoteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } + +func (m *MemberPromoteRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +type MemberPromoteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after promoting the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberPromoteResponse) Reset() { *m = MemberPromoteResponse{} } +func (m *MemberPromoteResponse) String() string { return proto.CompactTextString(m) } +func (*MemberPromoteResponse) ProtoMessage() {} +func (*MemberPromoteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } + +func (m *MemberPromoteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberPromoteResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type DefragmentRequest struct { +} + +func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } +func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } +func (*DefragmentRequest) ProtoMessage() {} +func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } + +type DefragmentResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } +func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } +func (*DefragmentResponse) ProtoMessage() {} +func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } + +func (m *DefragmentResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type MoveLeaderRequest struct { + // targetID is the node ID for the new leader. + TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` +} + +func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } +func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderRequest) ProtoMessage() {} +func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } + +func (m *MoveLeaderRequest) GetTargetID() uint64 { + if m != nil { + return m.TargetID + } + return 0 +} + +type MoveLeaderResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } +func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderResponse) ProtoMessage() {} +func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } + +func (m *MoveLeaderResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AlarmRequest struct { + // action is the kind of alarm request to issue. The action + // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a + // raised alarm. + Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"` + // memberID is the ID of the member associated with the alarm. If memberID is 0, the + // alarm request covers all members. + MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` + // alarm is the type of alarm to consider for this request. + Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` +} + +func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } +func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } +func (*AlarmRequest) ProtoMessage() {} +func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } + +func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { + if m != nil { + return m.Action + } + return AlarmRequest_GET +} + +func (m *AlarmRequest) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmRequest) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + +type AlarmMember struct { + // memberID is the ID of the member associated with the raised alarm. + MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` + // alarm is the type of alarm which has been raised. + Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` +} + +func (m *AlarmMember) Reset() { *m = AlarmMember{} } +func (m *AlarmMember) String() string { return proto.CompactTextString(m) } +func (*AlarmMember) ProtoMessage() {} +func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } + +func (m *AlarmMember) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmMember) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + +type AlarmResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // alarms is a list of alarms associated with the alarm request. + Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms" json:"alarms,omitempty"` +} + +func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } +func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } +func (*AlarmResponse) ProtoMessage() {} +func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } + +func (m *AlarmResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AlarmResponse) GetAlarms() []*AlarmMember { + if m != nil { + return m.Alarms + } + return nil +} + +type StatusRequest struct { +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } + +type StatusResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // version is the cluster protocol version used by the responding member. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. + DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` + // leader is the member ID which the responding member believes is the current leader. + Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` + // raftIndex is the current raft committed index of the responding member. + RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` + // raftTerm is the current raft term of the responding member. + RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` + // raftAppliedIndex is the current raft applied index of the responding member. + RaftAppliedIndex uint64 `protobuf:"varint,7,opt,name=raftAppliedIndex,proto3" json:"raftAppliedIndex,omitempty"` + // errors contains alarm/health information and status. + Errors []string `protobuf:"bytes,8,rep,name=errors" json:"errors,omitempty"` + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } + +func (m *StatusResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *StatusResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *StatusResponse) GetDbSize() int64 { + if m != nil { + return m.DbSize + } + return 0 +} + +func (m *StatusResponse) GetLeader() uint64 { + if m != nil { + return m.Leader + } + return 0 +} + +func (m *StatusResponse) GetRaftIndex() uint64 { + if m != nil { + return m.RaftIndex + } + return 0 +} + +func (m *StatusResponse) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + +func (m *StatusResponse) GetRaftAppliedIndex() uint64 { + if m != nil { + return m.RaftAppliedIndex + } + return 0 +} + +func (m *StatusResponse) GetErrors() []string { + if m != nil { + return m.Errors + } + return nil +} + +func (m *StatusResponse) GetDbSizeInUse() int64 { + if m != nil { + return m.DbSizeInUse + } + return 0 +} + +func (m *StatusResponse) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + +type AuthEnableRequest struct { +} + +func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } +func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthEnableRequest) ProtoMessage() {} +func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } + +type AuthDisableRequest struct { +} + +func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } +func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthDisableRequest) ProtoMessage() {} +func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } + +type AuthenticateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } +func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*AuthenticateRequest) ProtoMessage() {} +func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } + +func (m *AuthenticateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthenticateRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type AuthUserAddRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Options *authpb.UserAddOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } +func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddRequest) ProtoMessage() {} +func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } + +func (m *AuthUserAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserAddRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *AuthUserAddRequest) GetOptions() *authpb.UserAddOptions { + if m != nil { + return m.Options + } + return nil +} + +type AuthUserGetRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } +func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetRequest) ProtoMessage() {} +func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } + +func (m *AuthUserGetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthUserDeleteRequest struct { + // name is the name of the user to delete. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } +func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteRequest) ProtoMessage() {} +func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } + +func (m *AuthUserDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthUserChangePasswordRequest struct { + // name is the name of the user whose password is being changed. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // password is the new password for the user. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} } +func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserChangePasswordRequest) ProtoMessage() {} +func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{65} +} + +func (m *AuthUserChangePasswordRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type AuthUserGrantRoleRequest struct { + // user is the name of the user which should be granted a given role. + User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + // role is the name of the role to grant to the user. + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } +func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleRequest) ProtoMessage() {} +func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } + +func (m *AuthUserGrantRoleRequest) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AuthUserGrantRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthUserRevokeRoleRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } +func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleRequest) ProtoMessage() {} +func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} } + +func (m *AuthUserRevokeRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserRevokeRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthRoleAddRequest struct { + // name is the name of the role to add to the authentication system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } +func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddRequest) ProtoMessage() {} +func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{68} } + +func (m *AuthRoleAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthRoleGetRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } +func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetRequest) ProtoMessage() {} +func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } + +func (m *AuthRoleGetRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthUserListRequest struct { +} + +func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } +func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserListRequest) ProtoMessage() {} +func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } + +type AuthRoleListRequest struct { +} + +func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } +func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListRequest) ProtoMessage() {} +func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } + +type AuthRoleDeleteRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } +func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteRequest) ProtoMessage() {} +func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } + +func (m *AuthRoleDeleteRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthRoleGrantPermissionRequest struct { + // name is the name of the role which will be granted the permission. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // perm is the permission to grant to the role. + Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm" json:"perm,omitempty"` +} + +func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} } +func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} +func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{73} +} + +func (m *AuthRoleGrantPermissionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { + if m != nil { + return m.Perm + } + return nil +} + +type AuthRoleRevokePermissionRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } +func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} +func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{74} +} + +func (m *AuthRoleRevokePermissionRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +type AuthEnableResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } +func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthEnableResponse) ProtoMessage() {} +func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{75} } + +func (m *AuthEnableResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthDisableResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } +func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthDisableResponse) ProtoMessage() {} +func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{76} } + +func (m *AuthDisableResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthenticateResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // token is an authorized token that can be used in succeeding RPCs + Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` +} + +func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } +func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } +func (*AuthenticateResponse) ProtoMessage() {} +func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{77} } + +func (m *AuthenticateResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthenticateResponse) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +type AuthUserAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } +func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddResponse) ProtoMessage() {} +func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{78} } + +func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserGetResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` +} + +func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } +func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetResponse) ProtoMessage() {} +func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{79} } + +func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthUserGetResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type AuthUserDeleteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } +func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteResponse) ProtoMessage() {} +func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{80} } + +func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserChangePasswordResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} } +func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserChangePasswordResponse) ProtoMessage() {} +func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{81} +} + +func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserGrantRoleResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } +func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleResponse) ProtoMessage() {} +func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{82} } + +func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserRevokeRoleResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } +func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleResponse) ProtoMessage() {} +func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{83} } + +func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } +func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddResponse) ProtoMessage() {} +func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{84} } + +func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleGetResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm" json:"perm,omitempty"` +} + +func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } +func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetResponse) ProtoMessage() {} +func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{85} } + +func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission { + if m != nil { + return m.Perm + } + return nil +} + +type AuthRoleListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` +} + +func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } +func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListResponse) ProtoMessage() {} +func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{86} } + +func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthRoleListResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type AuthUserListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` +} + +func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } +func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserListResponse) ProtoMessage() {} +func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{87} } + +func (m *AuthUserListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthUserListResponse) GetUsers() []string { + if m != nil { + return m.Users + } + return nil +} + +type AuthRoleDeleteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } +func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteResponse) ProtoMessage() {} +func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{88} } + +func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleGrantPermissionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} } +func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} +func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{89} +} + +func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleRevokePermissionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} } +func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} +func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{90} +} + +func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") + proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") + proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") + proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest") + proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse") + proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest") + proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse") + proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp") + proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp") + proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare") + proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest") + proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse") + proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest") + proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse") + proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest") + proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest") + proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse") + proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse") + proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest") + proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse") + proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest") + proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest") + proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest") + proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest") + proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse") + proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest") + proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse") + proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest") + proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") + proto.RegisterType((*LeaseCheckpoint)(nil), "etcdserverpb.LeaseCheckpoint") + proto.RegisterType((*LeaseCheckpointRequest)(nil), "etcdserverpb.LeaseCheckpointRequest") + proto.RegisterType((*LeaseCheckpointResponse)(nil), "etcdserverpb.LeaseCheckpointResponse") + proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") + proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") + proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") + proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse") + proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest") + proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus") + proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse") + proto.RegisterType((*Member)(nil), "etcdserverpb.Member") + proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest") + proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse") + proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest") + proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse") + proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest") + proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse") + proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest") + proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") + proto.RegisterType((*MemberPromoteRequest)(nil), "etcdserverpb.MemberPromoteRequest") + proto.RegisterType((*MemberPromoteResponse)(nil), "etcdserverpb.MemberPromoteResponse") + proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") + proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") + proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest") + proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse") + proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest") + proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember") + proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse") + proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse") + proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest") + proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest") + proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest") + proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest") + proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest") + proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest") + proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest") + proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest") + proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest") + proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest") + proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest") + proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest") + proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest") + proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest") + proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest") + proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest") + proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse") + proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse") + proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse") + proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse") + proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse") + proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse") + proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse") + proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse") + proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse") + proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse") + proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse") + proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse") + proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse") + proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse") + proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse") + proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse") + proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) + proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) + proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for KV service + +type KVClient interface { + // Range gets the keys in the range from the key-value store. + Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) +} + +type kVClient struct { + cc *grpc.ClientConn +} + +func NewKVClient(cc *grpc.ClientConn) KVClient { + return &kVClient{cc} +} + +func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) { + out := new(RangeResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { + out := new(PutResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) { + out := new(DeleteRangeResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) { + out := new(TxnResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) { + out := new(CompactionResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for KV service + +type KVServer interface { + // Range gets the keys in the range from the key-value store. + Range(context.Context, *RangeRequest) (*RangeResponse, error) + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + Put(context.Context, *PutRequest) (*PutResponse, error) + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error) + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + Txn(context.Context, *TxnRequest) (*TxnResponse, error) + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + Compact(context.Context, *CompactionRequest) (*CompactionResponse, error) +} + +func RegisterKVServer(s *grpc.Server, srv KVServer) { + s.RegisterService(&_KV_serviceDesc, srv) +} + +func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Range(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Range", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Range(ctx, req.(*RangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Put(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Put", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Put(ctx, req.(*PutRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).DeleteRange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/DeleteRange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TxnRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Txn(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Txn", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Txn(ctx, req.(*TxnRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Compact(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Compact", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Compact(ctx, req.(*CompactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KV_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.KV", + HandlerType: (*KVServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Range", + Handler: _KV_Range_Handler, + }, + { + MethodName: "Put", + Handler: _KV_Put_Handler, + }, + { + MethodName: "DeleteRange", + Handler: _KV_DeleteRange_Handler, + }, + { + MethodName: "Txn", + Handler: _KV_Txn_Handler, + }, + { + MethodName: "Compact", + Handler: _KV_Compact_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +// Client API for Watch service + +type WatchClient interface { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) +} + +type watchClient struct { + cc *grpc.ClientConn +} + +func NewWatchClient(cc *grpc.ClientConn) WatchClient { + return &watchClient{cc} +} + +func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Watch/Watch", opts...) + if err != nil { + return nil, err + } + x := &watchWatchClient{stream} + return x, nil +} + +type Watch_WatchClient interface { + Send(*WatchRequest) error + Recv() (*WatchResponse, error) + grpc.ClientStream +} + +type watchWatchClient struct { + grpc.ClientStream +} + +func (x *watchWatchClient) Send(m *WatchRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *watchWatchClient) Recv() (*WatchResponse, error) { + m := new(WatchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Watch service + +type WatchServer interface { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + Watch(Watch_WatchServer) error +} + +func RegisterWatchServer(s *grpc.Server, srv WatchServer) { + s.RegisterService(&_Watch_serviceDesc, srv) +} + +func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WatchServer).Watch(&watchWatchServer{stream}) +} + +type Watch_WatchServer interface { + Send(*WatchResponse) error + Recv() (*WatchRequest, error) + grpc.ServerStream +} + +type watchWatchServer struct { + grpc.ServerStream +} + +func (x *watchWatchServer) Send(m *WatchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *watchWatchServer) Recv() (*WatchRequest, error) { + m := new(WatchRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Watch_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Watch", + HandlerType: (*WatchServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Watch_Watch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// Client API for Lease service + +type LeaseClient interface { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) + // LeaseLeases lists all existing leases. + LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) +} + +type leaseClient struct { + cc *grpc.ClientConn +} + +func NewLeaseClient(cc *grpc.ClientConn) LeaseClient { + return &leaseClient{cc} +} + +func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) { + out := new(LeaseGrantResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) { + out := new(LeaseRevokeResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Lease_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Lease/LeaseKeepAlive", opts...) + if err != nil { + return nil, err + } + x := &leaseLeaseKeepAliveClient{stream} + return x, nil +} + +type Lease_LeaseKeepAliveClient interface { + Send(*LeaseKeepAliveRequest) error + Recv() (*LeaseKeepAliveResponse, error) + grpc.ClientStream +} + +type leaseLeaseKeepAliveClient struct { + grpc.ClientStream +} + +func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { + m := new(LeaseKeepAliveResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { + out := new(LeaseTimeToLiveResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) { + out := new(LeaseLeasesResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Lease service + +type LeaseServer interface { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error) + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error) + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + LeaseKeepAlive(Lease_LeaseKeepAliveServer) error + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) + // LeaseLeases lists all existing leases. + LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error) +} + +func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { + s.RegisterService(&_Lease_serviceDesc, srv) +} + +func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseGrantRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseGrant(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseGrant", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseRevokeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseRevoke(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseRevoke", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream}) +} + +type Lease_LeaseKeepAliveServer interface { + Send(*LeaseKeepAliveResponse) error + Recv() (*LeaseKeepAliveRequest, error) + grpc.ServerStream +} + +type leaseLeaseKeepAliveServer struct { + grpc.ServerStream +} + +func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) { + m := new(LeaseKeepAliveRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseTimeToLiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseTimeToLive(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseLeasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseLeases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseLeases", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Lease_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Lease", + HandlerType: (*LeaseServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LeaseGrant", + Handler: _Lease_LeaseGrant_Handler, + }, + { + MethodName: "LeaseRevoke", + Handler: _Lease_LeaseRevoke_Handler, + }, + { + MethodName: "LeaseTimeToLive", + Handler: _Lease_LeaseTimeToLive_Handler, + }, + { + MethodName: "LeaseLeases", + Handler: _Lease_LeaseLeases_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "LeaseKeepAlive", + Handler: _Lease_LeaseKeepAlive_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// Client API for Cluster service + +type ClusterClient interface { + // MemberAdd adds a member into the cluster. + MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) + // MemberRemove removes an existing member from the cluster. + MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) + // MemberUpdate updates the member configuration. + MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) + // MemberList lists all the members in the cluster. + MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) +} + +type clusterClient struct { + cc *grpc.ClientConn +} + +func NewClusterClient(cc *grpc.ClientConn) ClusterClient { + return &clusterClient{cc} +} + +func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) { + out := new(MemberAddResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) { + out := new(MemberRemoveResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) { + out := new(MemberUpdateResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) { + out := new(MemberListResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) { + out := new(MemberPromoteResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberPromote", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Cluster service + +type ClusterServer interface { + // MemberAdd adds a member into the cluster. + MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error) + // MemberRemove removes an existing member from the cluster. + MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error) + // MemberUpdate updates the member configuration. + MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error) + // MemberList lists all the members in the cluster. + MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error) + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(context.Context, *MemberPromoteRequest) (*MemberPromoteResponse, error) +} + +func RegisterClusterServer(s *grpc.Server, srv ClusterServer) { + s.RegisterService(&_Cluster_serviceDesc, srv) +} + +func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberRemoveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberRemove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberRemove", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberUpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberUpdate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberUpdate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberPromote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberPromoteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberPromote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberPromote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberPromote(ctx, req.(*MemberPromoteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Cluster_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Cluster", + HandlerType: (*ClusterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "MemberAdd", + Handler: _Cluster_MemberAdd_Handler, + }, + { + MethodName: "MemberRemove", + Handler: _Cluster_MemberRemove_Handler, + }, + { + MethodName: "MemberUpdate", + Handler: _Cluster_MemberUpdate_Handler, + }, + { + MethodName: "MemberList", + Handler: _Cluster_MemberList_Handler, + }, + { + MethodName: "MemberPromote", + Handler: _Cluster_MemberPromote_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +// Client API for Maintenance service + +type MaintenanceClient interface { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) + // Status gets the status of the member. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + // Defragment defragments a member's backend database to recover storage space. + Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. + Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) + // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. + HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) + // MoveLeader requests current leader node to transfer its leadership to transferee. + MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) +} + +type maintenanceClient struct { + cc *grpc.ClientConn +} + +func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient { + return &maintenanceClient{cc} +} + +func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) { + out := new(AlarmResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) { + out := new(DefragmentResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) { + out := new(HashResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) { + out := new(HashKVResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Maintenance_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Maintenance/Snapshot", opts...) + if err != nil { + return nil, err + } + x := &maintenanceSnapshotClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Maintenance_SnapshotClient interface { + Recv() (*SnapshotResponse, error) + grpc.ClientStream +} + +type maintenanceSnapshotClient struct { + grpc.ClientStream +} + +func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { + m := new(SnapshotResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) { + out := new(MoveLeaderResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Maintenance service + +type MaintenanceServer interface { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error) + // Status gets the status of the member. + Status(context.Context, *StatusRequest) (*StatusResponse, error) + // Defragment defragments a member's backend database to recover storage space. + Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. + Hash(context.Context, *HashRequest) (*HashResponse, error) + // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. + HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error) + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error + // MoveLeader requests current leader node to transfer its leadership to transferee. + MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error) +} + +func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { + s.RegisterService(&_Maintenance_serviceDesc, srv) +} + +func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AlarmRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Alarm(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Alarm", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DefragmentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Defragment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Defragment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HashRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Hash(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Hash", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HashKVRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).HashKV(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/HashKV", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SnapshotRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream}) +} + +type Maintenance_SnapshotServer interface { + Send(*SnapshotResponse) error + grpc.ServerStream +} + +type maintenanceSnapshotServer struct { + grpc.ServerStream +} + +func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MoveLeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).MoveLeader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/MoveLeader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Maintenance_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Maintenance", + HandlerType: (*MaintenanceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Alarm", + Handler: _Maintenance_Alarm_Handler, + }, + { + MethodName: "Status", + Handler: _Maintenance_Status_Handler, + }, + { + MethodName: "Defragment", + Handler: _Maintenance_Defragment_Handler, + }, + { + MethodName: "Hash", + Handler: _Maintenance_Hash_Handler, + }, + { + MethodName: "HashKV", + Handler: _Maintenance_HashKV_Handler, + }, + { + MethodName: "MoveLeader", + Handler: _Maintenance_MoveLeader_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Snapshot", + Handler: _Maintenance_Snapshot_Handler, + ServerStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// Client API for Auth service + +type AuthClient interface { + // AuthEnable enables authentication. + AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) + // AuthDisable disables authentication. + AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) + // Authenticate processes an authenticate request. + Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) + // UserAdd adds a new user. User name cannot be empty. + UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) + // UserGet gets detailed user information. + UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) + // UserList gets a list of all users. + UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) + // UserDelete deletes a specified user. + UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) + // UserChangePassword changes the password of a specified user. + UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) + // UserGrant grants a role to a specified user. + UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) + // UserRevokeRole revokes a role of specified user. + UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) + // RoleAdd adds a new role. Role name cannot be empty. + RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) + // RoleGet gets detailed role information. + RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) + // RoleList gets lists of all roles. + RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) + // RoleDelete deletes a specified role. + RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) + // RoleRevokePermission revokes a key or range permission of a specified role. + RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) +} + +type authClient struct { + cc *grpc.ClientConn +} + +func NewAuthClient(cc *grpc.ClientConn) AuthClient { + return &authClient{cc} +} + +func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) { + out := new(AuthEnableResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) { + out := new(AuthDisableResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { + out := new(AuthenticateResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) { + out := new(AuthUserAddResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) { + out := new(AuthUserGetResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) { + out := new(AuthUserListResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) { + out := new(AuthUserDeleteResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) { + out := new(AuthUserChangePasswordResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) { + out := new(AuthUserGrantRoleResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) { + out := new(AuthUserRevokeRoleResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) { + out := new(AuthRoleAddResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) { + out := new(AuthRoleGetResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) { + out := new(AuthRoleListResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) { + out := new(AuthRoleDeleteResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) { + out := new(AuthRoleGrantPermissionResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) { + out := new(AuthRoleRevokePermissionResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Auth service + +type AuthServer interface { + // AuthEnable enables authentication. + AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error) + // AuthDisable disables authentication. + AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error) + // Authenticate processes an authenticate request. + Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) + // UserAdd adds a new user. User name cannot be empty. + UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error) + // UserGet gets detailed user information. + UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error) + // UserList gets a list of all users. + UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error) + // UserDelete deletes a specified user. + UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) + // UserChangePassword changes the password of a specified user. + UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) + // UserGrant grants a role to a specified user. + UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) + // UserRevokeRole revokes a role of specified user. + UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) + // RoleAdd adds a new role. Role name cannot be empty. + RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error) + // RoleGet gets detailed role information. + RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error) + // RoleList gets lists of all roles. + RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error) + // RoleDelete deletes a specified role. + RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) + // RoleRevokePermission revokes a key or range permission of a specified role. + RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) +} + +func RegisterAuthServer(s *grpc.Server, srv AuthServer) { + s.RegisterService(&_Auth_serviceDesc, srv) +} + +func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthEnableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthEnable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthEnable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthDisableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthDisable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthDisable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthenticateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).Authenticate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/Authenticate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserChangePasswordRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserChangePassword(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserChangePassword", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserGrantRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserGrantRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserGrantRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserRevokeRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserRevokeRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserRevokeRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleGrantPermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleGrantPermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleGrantPermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleRevokePermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleRevokePermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleRevokePermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Auth_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Auth", + HandlerType: (*AuthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AuthEnable", + Handler: _Auth_AuthEnable_Handler, + }, + { + MethodName: "AuthDisable", + Handler: _Auth_AuthDisable_Handler, + }, + { + MethodName: "Authenticate", + Handler: _Auth_Authenticate_Handler, + }, + { + MethodName: "UserAdd", + Handler: _Auth_UserAdd_Handler, + }, + { + MethodName: "UserGet", + Handler: _Auth_UserGet_Handler, + }, + { + MethodName: "UserList", + Handler: _Auth_UserList_Handler, + }, + { + MethodName: "UserDelete", + Handler: _Auth_UserDelete_Handler, + }, + { + MethodName: "UserChangePassword", + Handler: _Auth_UserChangePassword_Handler, + }, + { + MethodName: "UserGrantRole", + Handler: _Auth_UserGrantRole_Handler, + }, + { + MethodName: "UserRevokeRole", + Handler: _Auth_UserRevokeRole_Handler, + }, + { + MethodName: "RoleAdd", + Handler: _Auth_RoleAdd_Handler, + }, + { + MethodName: "RoleGet", + Handler: _Auth_RoleGet_Handler, + }, + { + MethodName: "RoleList", + Handler: _Auth_RoleList_Handler, + }, + { + MethodName: "RoleDelete", + Handler: _Auth_RoleDelete_Handler, + }, + { + MethodName: "RoleGrantPermission", + Handler: _Auth_RoleGrantPermission_Handler, + }, + { + MethodName: "RoleRevokePermission", + Handler: _Auth_RoleRevokePermission_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ClusterId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) + } + if m.MemberId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) + } + if m.Revision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + if m.RaftTerm != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) + } + return i, nil +} + +func (m *RangeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + if m.Limit != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + } + if m.Revision != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + if m.SortOrder != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) + } + if m.SortTarget != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) + } + if m.Serializable { + dAtA[i] = 0x38 + i++ + if m.Serializable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.KeysOnly { + dAtA[i] = 0x40 + i++ + if m.KeysOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.CountOnly { + dAtA[i] = 0x48 + i++ + if m.CountOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.MinModRevision != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) + } + if m.MaxModRevision != 0 { + dAtA[i] = 0x58 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) + } + if m.MinCreateRevision != 0 { + dAtA[i] = 0x60 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) + } + if m.MaxCreateRevision != 0 { + dAtA[i] = 0x68 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) + } + return i, nil +} + +func (m *RangeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Kvs) > 0 { + for _, msg := range m.Kvs { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.More { + dAtA[i] = 0x18 + i++ + if m.More { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Count != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Count)) + } + return i, nil +} + +func (m *PutRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.Lease != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + } + if m.PrevKv { + dAtA[i] = 0x20 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IgnoreValue { + dAtA[i] = 0x28 + i++ + if m.IgnoreValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IgnoreLease { + dAtA[i] = 0x30 + i++ + if m.IgnoreLease { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PutResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n2, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.PrevKv != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.PrevKv.Size())) + n3, err := m.PrevKv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + if m.PrevKv { + dAtA[i] = 0x18 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n4, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Deleted != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) + } + if len(m.PrevKvs) > 0 { + for _, msg := range m.PrevKvs { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RequestOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Request != nil { + nn5, err := m.Request.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn5 + } + return i, nil +} + +func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestRange != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestRange.Size())) + n6, err := m.RequestRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestPut != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestPut.Size())) + n7, err := m.RequestPut.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestDeleteRange != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestDeleteRange.Size())) + n8, err := m.RequestDeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestTxn != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestTxn.Size())) + n9, err := m.RequestTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *ResponseOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Response != nil { + nn10, err := m.Response.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn10 + } + return i, nil +} + +func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseRange != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size())) + n11, err := m.ResponseRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil +} +func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponsePut != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size())) + n12, err := m.ResponsePut.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseDeleteRange != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size())) + n13, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} +func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseTxn != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseTxn.Size())) + n14, err := m.ResponseTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} +func (m *Compare) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Compare) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Result != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Result)) + } + if m.Target != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Target)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.TargetUnion != nil { + nn15, err := m.TargetUnion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn15 + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + return i, nil +} + +func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Version)) + return i, nil +} +func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) + return i, nil +} +func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) + return i, nil +} +func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Value != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} +func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x40 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + return i, nil +} +func (m *TxnRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Compare) > 0 { + for _, msg := range m.Compare { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Success) > 0 { + for _, msg := range m.Success { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Failure) > 0 { + for _, msg := range m.Failure { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TxnResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n16, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.Succeeded { + dAtA[i] = 0x10 + i++ + if m.Succeeded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Responses) > 0 { + for _, msg := range m.Responses { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Revision != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + if m.Physical { + dAtA[i] = 0x10 + i++ + if m.Physical { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n17, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *HashRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *HashKVRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Revision != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + return i, nil +} + +func (m *HashKVResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n18, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.Hash != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) + } + if m.CompactRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) + } + return i, nil +} + +func (m *HashResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n19, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.Hash != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) + } + return i, nil +} + +func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n20, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.RemainingBytes != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) + } + if len(m.Blob) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) + i += copy(dAtA[i:], m.Blob) + } + return i, nil +} + +func (m *WatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RequestUnion != nil { + nn21, err := m.RequestUnion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn21 + } + return i, nil +} + +func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CreateRequest != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size())) + n22, err := m.CreateRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} +func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CancelRequest != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size())) + n23, err := m.CancelRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + return i, nil +} +func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ProgressRequest != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ProgressRequest.Size())) + n24, err := m.ProgressRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + return i, nil +} +func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + if m.StartRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) + } + if m.ProgressNotify { + dAtA[i] = 0x20 + i++ + if m.ProgressNotify { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Filters) > 0 { + dAtA26 := make([]byte, len(m.Filters)*10) + var j25 int + for _, num := range m.Filters { + for num >= 1<<7 { + dAtA26[j25] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j25++ + } + dAtA26[j25] = uint8(num) + j25++ + } + dAtA[i] = 0x2a + i++ + i = encodeVarintRpc(dAtA, i, uint64(j25)) + i += copy(dAtA[i:], dAtA26[:j25]) + } + if m.PrevKv { + dAtA[i] = 0x30 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.WatchId != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + } + if m.Fragment { + dAtA[i] = 0x40 + i++ + if m.Fragment { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WatchId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + } + return i, nil +} + +func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *WatchResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n27, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.WatchId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + } + if m.Created { + dAtA[i] = 0x18 + i++ + if m.Created { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Canceled { + dAtA[i] = 0x20 + i++ + if m.Canceled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.CompactRevision != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) + } + if len(m.CancelReason) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) + i += copy(dAtA[i:], m.CancelReason) + } + if m.Fragment { + dAtA[i] = 0x38 + i++ + if m.Fragment { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0x5a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TTL != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n28, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + if len(m.Error) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + return i, nil +} + +func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n29, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func (m *LeaseCheckpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCheckpoint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.Remaining_TTL != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Remaining_TTL)) + } + return i, nil +} + +func (m *LeaseCheckpointRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCheckpointRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Checkpoints) > 0 { + for _, msg := range m.Checkpoints { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LeaseCheckpointResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCheckpointResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n30, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n31, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + return i, nil +} + +func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.Keys { + dAtA[i] = 0x10 + i++ + if m.Keys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n32, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + if m.GrantedTTL != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + dAtA[i] = 0x2a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) + } + } + return i, nil +} + +func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n33, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + if len(m.Leases) > 0 { + for _, msg := range m.Leases { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Member) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Member) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ClientURLs) > 0 { + for _, s := range m.ClientURLs { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.IsLearner { + dAtA[i] = 0x28 + i++ + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.IsLearner { + dAtA[i] = 0x10 + i++ + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n34, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if m.Member != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size())) + n35, err := m.Member.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n36, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n37, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n38, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberPromoteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberPromoteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *MemberPromoteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberPromoteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n39, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n40, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + return i, nil +} + +func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TargetID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) + } + return i, nil +} + +func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n41, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} + +func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Action)) + } + if m.MemberID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) + } + if m.Alarm != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) + } + return i, nil +} + +func (m *AlarmMember) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MemberID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) + } + if m.Alarm != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) + } + return i, nil +} + +func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n42, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + if len(m.Alarms) > 0 { + for _, msg := range m.Alarms { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n43, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + if len(m.Version) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + } + if m.DbSize != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) + } + if m.Leader != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) + } + if m.RaftIndex != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) + } + if m.RaftTerm != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) + } + if m.RaftAppliedIndex != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftAppliedIndex)) + } + if len(m.Errors) > 0 { + for _, s := range m.Errors { + dAtA[i] = 0x42 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.DbSizeInUse != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeInUse)) + } + if m.IsLearner { + dAtA[i] = 0x50 + i++ + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + return i, nil +} + +func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + if m.Options != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Options.Size())) + n44, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + return i, nil +} + +func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + return i, nil +} + +func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.User) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Role) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Role) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Perm != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size())) + n45, err := m.Perm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + return i, nil +} + +func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + return i, nil +} + +func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n46, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + } + return i, nil +} + +func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n47, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + } + return i, nil +} + +func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n48, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } + if len(m.Token) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + } + return i, nil +} + +func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n49, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + return i, nil +} + +func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n50, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n51, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + } + return i, nil +} + +func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n52, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + } + return i, nil +} + +func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n53, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + } + return i, nil +} + +func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n54, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 + } + return i, nil +} + +func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n55, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 + } + return i, nil +} + +func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n56, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n56 + } + if len(m.Perm) > 0 { + for _, msg := range m.Perm { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n57, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n57 + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n58, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } + if len(m.Users) > 0 { + for _, s := range m.Users { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n59, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 + } + return i, nil +} + +func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n60, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n60 + } + return i, nil +} + +func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n61, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n61 + } + return i, nil +} + +func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ResponseHeader) Size() (n int) { + var l int + _ = l + if m.ClusterId != 0 { + n += 1 + sovRpc(uint64(m.ClusterId)) + } + if m.MemberId != 0 { + n += 1 + sovRpc(uint64(m.MemberId)) + } + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.RaftTerm != 0 { + n += 1 + sovRpc(uint64(m.RaftTerm)) + } + return n +} + +func (m *RangeRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) + } + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.SortOrder != 0 { + n += 1 + sovRpc(uint64(m.SortOrder)) + } + if m.SortTarget != 0 { + n += 1 + sovRpc(uint64(m.SortTarget)) + } + if m.Serializable { + n += 2 + } + if m.KeysOnly { + n += 2 + } + if m.CountOnly { + n += 2 + } + if m.MinModRevision != 0 { + n += 1 + sovRpc(uint64(m.MinModRevision)) + } + if m.MaxModRevision != 0 { + n += 1 + sovRpc(uint64(m.MaxModRevision)) + } + if m.MinCreateRevision != 0 { + n += 1 + sovRpc(uint64(m.MinCreateRevision)) + } + if m.MaxCreateRevision != 0 { + n += 1 + sovRpc(uint64(m.MaxCreateRevision)) + } + return n +} + +func (m *RangeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Kvs) > 0 { + for _, e := range m.Kvs { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.More { + n += 2 + } + if m.Count != 0 { + n += 1 + sovRpc(uint64(m.Count)) + } + return n +} + +func (m *PutRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovRpc(uint64(m.Lease)) + } + if m.PrevKv { + n += 2 + } + if m.IgnoreValue { + n += 2 + } + if m.IgnoreLease { + n += 2 + } + return n +} + +func (m *PutResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.PrevKv != nil { + l = m.PrevKv.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *DeleteRangeRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.PrevKv { + n += 2 + } + return n +} + +func (m *DeleteRangeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Deleted != 0 { + n += 1 + sovRpc(uint64(m.Deleted)) + } + if len(m.PrevKvs) > 0 { + for _, e := range m.PrevKvs { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *RequestOp) Size() (n int) { + var l int + _ = l + if m.Request != nil { + n += m.Request.Size() + } + return n +} + +func (m *RequestOp_RequestRange) Size() (n int) { + var l int + _ = l + if m.RequestRange != nil { + l = m.RequestRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestPut) Size() (n int) { + var l int + _ = l + if m.RequestPut != nil { + l = m.RequestPut.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestDeleteRange) Size() (n int) { + var l int + _ = l + if m.RequestDeleteRange != nil { + l = m.RequestDeleteRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestTxn) Size() (n int) { + var l int + _ = l + if m.RequestTxn != nil { + l = m.RequestTxn.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp) Size() (n int) { + var l int + _ = l + if m.Response != nil { + n += m.Response.Size() + } + return n +} + +func (m *ResponseOp_ResponseRange) Size() (n int) { + var l int + _ = l + if m.ResponseRange != nil { + l = m.ResponseRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponsePut) Size() (n int) { + var l int + _ = l + if m.ResponsePut != nil { + l = m.ResponsePut.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { + var l int + _ = l + if m.ResponseDeleteRange != nil { + l = m.ResponseDeleteRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponseTxn) Size() (n int) { + var l int + _ = l + if m.ResponseTxn != nil { + l = m.ResponseTxn.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *Compare) Size() (n int) { + var l int + _ = l + if m.Result != 0 { + n += 1 + sovRpc(uint64(m.Result)) + } + if m.Target != 0 { + n += 1 + sovRpc(uint64(m.Target)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.TargetUnion != nil { + n += m.TargetUnion.Size() + } + l = len(m.RangeEnd) + if l > 0 { + n += 2 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *Compare_Version) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.Version)) + return n +} +func (m *Compare_CreateRevision) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.CreateRevision)) + return n +} +func (m *Compare_ModRevision) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.ModRevision)) + return n +} +func (m *Compare_Value) Size() (n int) { + var l int + _ = l + if m.Value != nil { + l = len(m.Value) + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *Compare_Lease) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.Lease)) + return n +} +func (m *TxnRequest) Size() (n int) { + var l int + _ = l + if len(m.Compare) > 0 { + for _, e := range m.Compare { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Success) > 0 { + for _, e := range m.Success { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Failure) > 0 { + for _, e := range m.Failure { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *TxnResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Succeeded { + n += 2 + } + if len(m.Responses) > 0 { + for _, e := range m.Responses { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *CompactionRequest) Size() (n int) { + var l int + _ = l + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.Physical { + n += 2 + } + return n +} + +func (m *CompactionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *HashRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *HashKVRequest) Size() (n int) { + var l int + _ = l + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + return n +} + +func (m *HashKVResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Hash != 0 { + n += 1 + sovRpc(uint64(m.Hash)) + } + if m.CompactRevision != 0 { + n += 1 + sovRpc(uint64(m.CompactRevision)) + } + return n +} + +func (m *HashResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Hash != 0 { + n += 1 + sovRpc(uint64(m.Hash)) + } + return n +} + +func (m *SnapshotRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *SnapshotResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.RemainingBytes != 0 { + n += 1 + sovRpc(uint64(m.RemainingBytes)) + } + l = len(m.Blob) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *WatchRequest) Size() (n int) { + var l int + _ = l + if m.RequestUnion != nil { + n += m.RequestUnion.Size() + } + return n +} + +func (m *WatchRequest_CreateRequest) Size() (n int) { + var l int + _ = l + if m.CreateRequest != nil { + l = m.CreateRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchRequest_CancelRequest) Size() (n int) { + var l int + _ = l + if m.CancelRequest != nil { + l = m.CancelRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchRequest_ProgressRequest) Size() (n int) { + var l int + _ = l + if m.ProgressRequest != nil { + l = m.ProgressRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchCreateRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.StartRevision != 0 { + n += 1 + sovRpc(uint64(m.StartRevision)) + } + if m.ProgressNotify { + n += 2 + } + if len(m.Filters) > 0 { + l = 0 + for _, e := range m.Filters { + l += sovRpc(uint64(e)) + } + n += 1 + sovRpc(uint64(l)) + l + } + if m.PrevKv { + n += 2 + } + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + if m.Fragment { + n += 2 + } + return n +} + +func (m *WatchCancelRequest) Size() (n int) { + var l int + _ = l + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + return n +} + +func (m *WatchProgressRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *WatchResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + if m.Created { + n += 2 + } + if m.Canceled { + n += 2 + } + if m.CompactRevision != 0 { + n += 1 + sovRpc(uint64(m.CompactRevision)) + } + l = len(m.CancelReason) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Fragment { + n += 2 + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *LeaseGrantRequest) Size() (n int) { + var l int + _ = l + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseGrantResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *LeaseRevokeRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseRevokeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *LeaseCheckpoint) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.Remaining_TTL != 0 { + n += 1 + sovRpc(uint64(m.Remaining_TTL)) + } + return n +} + +func (m *LeaseCheckpointRequest) Size() (n int) { + var l int + _ = l + if len(m.Checkpoints) > 0 { + for _, e := range m.Checkpoints { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *LeaseCheckpointResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *LeaseKeepAliveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseKeepAliveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + return n +} + +func (m *LeaseTimeToLiveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.Keys { + n += 2 + } + return n +} + +func (m *LeaseTimeToLiveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + if m.GrantedTTL != 0 { + n += 1 + sovRpc(uint64(m.GrantedTTL)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + l = len(b) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *LeaseLeasesRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *LeaseStatus) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseLeasesResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Leases) > 0 { + for _, e := range m.Leases { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *Member) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.ClientURLs) > 0 { + for _, s := range m.ClientURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.IsLearner { + n += 2 + } + return n +} + +func (m *MemberAddRequest) Size() (n int) { + var l int + _ = l + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.IsLearner { + n += 2 + } + return n +} + +func (m *MemberAddResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Member != nil { + l = m.Member.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberRemoveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *MemberRemoveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberUpdateRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberUpdateResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberListRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *MemberListResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberPromoteRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *MemberPromoteResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *DefragmentRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *DefragmentResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *MoveLeaderRequest) Size() (n int) { + var l int + _ = l + if m.TargetID != 0 { + n += 1 + sovRpc(uint64(m.TargetID)) + } + return n +} + +func (m *MoveLeaderResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AlarmRequest) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovRpc(uint64(m.Action)) + } + if m.MemberID != 0 { + n += 1 + sovRpc(uint64(m.MemberID)) + } + if m.Alarm != 0 { + n += 1 + sovRpc(uint64(m.Alarm)) + } + return n +} + +func (m *AlarmMember) Size() (n int) { + var l int + _ = l + if m.MemberID != 0 { + n += 1 + sovRpc(uint64(m.MemberID)) + } + if m.Alarm != 0 { + n += 1 + sovRpc(uint64(m.Alarm)) + } + return n +} + +func (m *AlarmResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Alarms) > 0 { + for _, e := range m.Alarms { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *StatusRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *StatusResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.DbSize != 0 { + n += 1 + sovRpc(uint64(m.DbSize)) + } + if m.Leader != 0 { + n += 1 + sovRpc(uint64(m.Leader)) + } + if m.RaftIndex != 0 { + n += 1 + sovRpc(uint64(m.RaftIndex)) + } + if m.RaftTerm != 0 { + n += 1 + sovRpc(uint64(m.RaftTerm)) + } + if m.RaftAppliedIndex != 0 { + n += 1 + sovRpc(uint64(m.RaftAppliedIndex)) + } + if len(m.Errors) > 0 { + for _, s := range m.Errors { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.DbSizeInUse != 0 { + n += 1 + sovRpc(uint64(m.DbSizeInUse)) + } + if m.IsLearner { + n += 2 + } + return n +} + +func (m *AuthEnableRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthDisableRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthenticateRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserAddRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGetRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserDeleteRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserChangePasswordRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGrantRoleRequest) Size() (n int) { + var l int + _ = l + l = len(m.User) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserRevokeRoleRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleAddRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGetRequest) Size() (n int) { + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserListRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthRoleListRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthRoleDeleteRequest) Size() (n int) { + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGrantPermissionRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Perm != nil { + l = m.Perm.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleRevokePermissionRequest) Size() (n int) { + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthEnableResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthDisableResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthenticateResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Token) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserAddResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGetResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthUserDeleteResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserChangePasswordResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGrantRoleResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserRevokeRoleResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleAddResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGetResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Perm) > 0 { + for _, e := range m.Perm { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthRoleListResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthUserListResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthRoleDeleteResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGrantPermissionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleRevokePermissionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func sovRpc(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRpc(x uint64) (n int) { + return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResponseHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + m.ClusterId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType) + } + m.MemberId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + } + m.RaftTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType) + } + m.SortOrder = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SortOrder |= (RangeRequest_SortOrder(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType) + } + m.SortTarget = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SortTarget |= (RangeRequest_SortTarget(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Serializable = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.KeysOnly = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CountOnly = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType) + } + m.MinModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType) + } + m.MaxModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType) + } + m.MinCreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinCreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType) + } + m.MaxCreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxCreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kvs = append(m.Kvs, &mvccpb.KeyValue{}) + if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field More", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.More = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreValue = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreLease = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrevKv == nil { + m.PrevKv = &mvccpb.KeyValue{} + } + if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) + } + m.Deleted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Deleted |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{}) + if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RangeRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestRange{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PutRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestPut{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DeleteRangeRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestDeleteRange{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TxnRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestTxn{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RangeResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseRange{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PutResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponsePut{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DeleteRangeResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseDeleteRange{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TxnResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseTxn{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Compare) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Compare: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Result |= (Compare_CompareResult(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + m.Target = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Target |= (Compare_CompareTarget(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_Version{v} + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_CreateRevision{v} + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_ModRevision{v} + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.TargetUnion = &Compare_Value{v} + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_Lease{v} + case 64: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxnRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Compare = append(m.Compare, &Compare{}) + if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Success = append(m.Success, &RequestOp{}) + if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Failure = append(m.Failure, &RequestOp{}) + if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxnResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Succeeded = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Responses = append(m.Responses, &ResponseOp{}) + if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Physical = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashKVRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashKVResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + } + m.CompactRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType) + } + m.RemainingBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RemainingBytes |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...) + if m.Blob == nil { + m.Blob = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchCreateRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_CreateRequest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchCancelRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_CancelRequest{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchProgressRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_ProgressRequest{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType) + } + m.StartRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressNotify = bool(v != 0) + case 5: + if wireType == 0 { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Fragment = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Created = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Canceled = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + } + m.CompactRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CancelReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Fragment = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &mvccpb.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCheckpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCheckpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCheckpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Remaining_TTL", wireType) + } + m.Remaining_TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Remaining_TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCheckpointRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCheckpointRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checkpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checkpoints = append(m.Checkpoints, &LeaseCheckpoint{}) + if err := m.Checkpoints[len(m.Checkpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCheckpointResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCheckpointResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Keys = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType) + } + m.GrantedTTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GrantedTTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) + copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leases = append(m.Leases, &LeaseStatus{}) + if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Member) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Member: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Member == nil { + m.Member = &Member{} + } + if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberPromoteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberPromoteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberPromoteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberPromoteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberPromoteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberPromoteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType) + } + m.TargetID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (AlarmRequest_AlarmAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) + } + m.MemberID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + m.Alarm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Alarm |= (AlarmType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmMember) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) + } + m.MemberID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + m.Alarm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Alarm |= (AlarmType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Alarms = append(m.Alarms, &AlarmMember{}) + if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType) + } + m.DbSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbSize |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + m.Leader = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Leader |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) + } + m.RaftIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + } + m.RaftTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftAppliedIndex", wireType) + } + m.RaftAppliedIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftAppliedIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Errors = append(m.Errors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbSizeInUse", wireType) + } + m.DbSizeInUse = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbSizeInUse |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &authpb.UserAddOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Perm == nil { + m.Perm = &authpb.Permission{} + } + if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Perm = append(m.Perm, &authpb.Permission{}) + if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRpc(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRpc + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRpc(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } + +var fileDescriptorRpc = []byte{ + // 3928 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x6f, 0x23, 0xc9, + 0x75, 0x56, 0x93, 0xe2, 0xed, 0xf0, 0x22, 0xaa, 0x74, 0x19, 0x0e, 0x67, 0x46, 0xa3, 0xad, 0xd9, + 0xd9, 0xd5, 0xce, 0xec, 0x8a, 0x6b, 0xd9, 0x4e, 0x80, 0x49, 0xe2, 0x58, 0x23, 0x71, 0x67, 0xb4, + 0xd2, 0x88, 0xda, 0x16, 0x67, 0xf6, 0x02, 0x23, 0x42, 0x8b, 0x2c, 0x49, 0x1d, 0x91, 0xdd, 0x74, + 0x77, 0x93, 0x23, 0x6d, 0x2e, 0x0e, 0x0c, 0xc7, 0x40, 0xf2, 0x68, 0x03, 0x41, 0xf2, 0x90, 0xa7, + 0x20, 0x08, 0xfc, 0x90, 0xe7, 0x00, 0xf9, 0x05, 0x79, 0xca, 0x05, 0xf9, 0x03, 0xc1, 0xc6, 0x2f, + 0xc9, 0xaf, 0x30, 0xea, 0xd6, 0x5d, 0x7d, 0xa3, 0xc6, 0xa6, 0x77, 0x5f, 0xa4, 0xae, 0x53, 0xa7, + 0xce, 0x39, 0x75, 0xaa, 0xea, 0x9c, 0xd3, 0x5f, 0x17, 0xa1, 0xe4, 0x8c, 0x7a, 0x9b, 0x23, 0xc7, + 0xf6, 0x6c, 0x54, 0x21, 0x5e, 0xaf, 0xef, 0x12, 0x67, 0x42, 0x9c, 0xd1, 0x69, 0x73, 0xf9, 0xdc, + 0x3e, 0xb7, 0x59, 0x47, 0x8b, 0x3e, 0x71, 0x9e, 0xe6, 0x6d, 0xca, 0xd3, 0x1a, 0x4e, 0x7a, 0x3d, + 0xf6, 0x67, 0x74, 0xda, 0xba, 0x9c, 0x88, 0xae, 0x3b, 0xac, 0xcb, 0x18, 0x7b, 0x17, 0xec, 0xcf, + 0xe8, 0x94, 0xfd, 0x13, 0x9d, 0x77, 0xcf, 0x6d, 0xfb, 0x7c, 0x40, 0x5a, 0xc6, 0xc8, 0x6c, 0x19, + 0x96, 0x65, 0x7b, 0x86, 0x67, 0xda, 0x96, 0xcb, 0x7b, 0xf1, 0x5f, 0x6a, 0x50, 0xd3, 0x89, 0x3b, + 0xb2, 0x2d, 0x97, 0x3c, 0x27, 0x46, 0x9f, 0x38, 0xe8, 0x1e, 0x40, 0x6f, 0x30, 0x76, 0x3d, 0xe2, + 0x9c, 0x98, 0xfd, 0x86, 0xb6, 0xae, 0x6d, 0xcc, 0xeb, 0x25, 0x41, 0xd9, 0xeb, 0xa3, 0x3b, 0x50, + 0x1a, 0x92, 0xe1, 0x29, 0xef, 0xcd, 0xb0, 0xde, 0x22, 0x27, 0xec, 0xf5, 0x51, 0x13, 0x8a, 0x0e, + 0x99, 0x98, 0xae, 0x69, 0x5b, 0x8d, 0xec, 0xba, 0xb6, 0x91, 0xd5, 0xfd, 0x36, 0x1d, 0xe8, 0x18, + 0x67, 0xde, 0x89, 0x47, 0x9c, 0x61, 0x63, 0x9e, 0x0f, 0xa4, 0x84, 0x2e, 0x71, 0x86, 0xf8, 0x27, + 0x39, 0xa8, 0xe8, 0x86, 0x75, 0x4e, 0x74, 0xf2, 0xc3, 0x31, 0x71, 0x3d, 0x54, 0x87, 0xec, 0x25, + 0xb9, 0x66, 0xea, 0x2b, 0x3a, 0x7d, 0xe4, 0xe3, 0xad, 0x73, 0x72, 0x42, 0x2c, 0xae, 0xb8, 0x42, + 0xc7, 0x5b, 0xe7, 0xa4, 0x6d, 0xf5, 0xd1, 0x32, 0xe4, 0x06, 0xe6, 0xd0, 0xf4, 0x84, 0x56, 0xde, + 0x08, 0x99, 0x33, 0x1f, 0x31, 0x67, 0x07, 0xc0, 0xb5, 0x1d, 0xef, 0xc4, 0x76, 0xfa, 0xc4, 0x69, + 0xe4, 0xd6, 0xb5, 0x8d, 0xda, 0xd6, 0xdb, 0x9b, 0xea, 0x42, 0x6c, 0xaa, 0x06, 0x6d, 0x1e, 0xdb, + 0x8e, 0xd7, 0xa1, 0xbc, 0x7a, 0xc9, 0x95, 0x8f, 0xe8, 0x23, 0x28, 0x33, 0x21, 0x9e, 0xe1, 0x9c, + 0x13, 0xaf, 0x91, 0x67, 0x52, 0x1e, 0xde, 0x20, 0xa5, 0xcb, 0x98, 0x75, 0xa6, 0x9e, 0x3f, 0x23, + 0x0c, 0x15, 0x97, 0x38, 0xa6, 0x31, 0x30, 0xbf, 0x34, 0x4e, 0x07, 0xa4, 0x51, 0x58, 0xd7, 0x36, + 0x8a, 0x7a, 0x88, 0x46, 0xe7, 0x7f, 0x49, 0xae, 0xdd, 0x13, 0xdb, 0x1a, 0x5c, 0x37, 0x8a, 0x8c, + 0xa1, 0x48, 0x09, 0x1d, 0x6b, 0x70, 0xcd, 0x16, 0xcd, 0x1e, 0x5b, 0x1e, 0xef, 0x2d, 0xb1, 0xde, + 0x12, 0xa3, 0xb0, 0xee, 0x0d, 0xa8, 0x0f, 0x4d, 0xeb, 0x64, 0x68, 0xf7, 0x4f, 0x7c, 0x87, 0x00, + 0x73, 0x48, 0x6d, 0x68, 0x5a, 0x2f, 0xec, 0xbe, 0x2e, 0xdd, 0x42, 0x39, 0x8d, 0xab, 0x30, 0x67, + 0x59, 0x70, 0x1a, 0x57, 0x2a, 0xe7, 0x26, 0x2c, 0x51, 0x99, 0x3d, 0x87, 0x18, 0x1e, 0x09, 0x98, + 0x2b, 0x8c, 0x79, 0x71, 0x68, 0x5a, 0x3b, 0xac, 0x27, 0xc4, 0x6f, 0x5c, 0xc5, 0xf8, 0xab, 0x82, + 0xdf, 0xb8, 0x0a, 0xf3, 0xe3, 0x4d, 0x28, 0xf9, 0x3e, 0x47, 0x45, 0x98, 0x3f, 0xec, 0x1c, 0xb6, + 0xeb, 0x73, 0x08, 0x20, 0xbf, 0x7d, 0xbc, 0xd3, 0x3e, 0xdc, 0xad, 0x6b, 0xa8, 0x0c, 0x85, 0xdd, + 0x36, 0x6f, 0x64, 0xf0, 0x53, 0x80, 0xc0, 0xbb, 0xa8, 0x00, 0xd9, 0xfd, 0xf6, 0xe7, 0xf5, 0x39, + 0xca, 0xf3, 0xaa, 0xad, 0x1f, 0xef, 0x75, 0x0e, 0xeb, 0x1a, 0x1d, 0xbc, 0xa3, 0xb7, 0xb7, 0xbb, + 0xed, 0x7a, 0x86, 0x72, 0xbc, 0xe8, 0xec, 0xd6, 0xb3, 0xa8, 0x04, 0xb9, 0x57, 0xdb, 0x07, 0x2f, + 0xdb, 0xf5, 0x79, 0xfc, 0x73, 0x0d, 0xaa, 0x62, 0xbd, 0xf8, 0x99, 0x40, 0xdf, 0x81, 0xfc, 0x05, + 0x3b, 0x17, 0x6c, 0x2b, 0x96, 0xb7, 0xee, 0x46, 0x16, 0x37, 0x74, 0x76, 0x74, 0xc1, 0x8b, 0x30, + 0x64, 0x2f, 0x27, 0x6e, 0x23, 0xb3, 0x9e, 0xdd, 0x28, 0x6f, 0xd5, 0x37, 0xf9, 0x81, 0xdd, 0xdc, + 0x27, 0xd7, 0xaf, 0x8c, 0xc1, 0x98, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0x1f, 0xda, 0x0e, 0x61, 0x3b, + 0xb6, 0xa8, 0xb3, 0x67, 0xba, 0x8d, 0xd9, 0xa2, 0x89, 0xdd, 0xca, 0x1b, 0xf8, 0x17, 0x1a, 0xc0, + 0xd1, 0xd8, 0x4b, 0x3f, 0x1a, 0xcb, 0x90, 0x9b, 0x50, 0xc1, 0xe2, 0x58, 0xf0, 0x06, 0x3b, 0x13, + 0xc4, 0x70, 0x89, 0x7f, 0x26, 0x68, 0x03, 0xdd, 0x82, 0xc2, 0xc8, 0x21, 0x93, 0x93, 0xcb, 0x09, + 0x53, 0x52, 0xd4, 0xf3, 0xb4, 0xb9, 0x3f, 0x41, 0x6f, 0x41, 0xc5, 0x3c, 0xb7, 0x6c, 0x87, 0x9c, + 0x70, 0x59, 0x39, 0xd6, 0x5b, 0xe6, 0x34, 0x66, 0xb7, 0xc2, 0xc2, 0x05, 0xe7, 0x55, 0x96, 0x03, + 0x4a, 0xc2, 0x16, 0x94, 0x99, 0xa9, 0x33, 0xb9, 0xef, 0xbd, 0xc0, 0xc6, 0x0c, 0x1b, 0x16, 0x77, + 0xa1, 0xb0, 0x1a, 0xff, 0x00, 0xd0, 0x2e, 0x19, 0x10, 0x8f, 0xcc, 0x12, 0x3d, 0x14, 0x9f, 0x64, + 0x55, 0x9f, 0xe0, 0x9f, 0x69, 0xb0, 0x14, 0x12, 0x3f, 0xd3, 0xb4, 0x1a, 0x50, 0xe8, 0x33, 0x61, + 0xdc, 0x82, 0xac, 0x2e, 0x9b, 0xe8, 0x31, 0x14, 0x85, 0x01, 0x6e, 0x23, 0x9b, 0xb2, 0x69, 0x0a, + 0xdc, 0x26, 0x17, 0xff, 0x22, 0x03, 0x25, 0x31, 0xd1, 0xce, 0x08, 0x6d, 0x43, 0xd5, 0xe1, 0x8d, + 0x13, 0x36, 0x1f, 0x61, 0x51, 0x33, 0x3d, 0x08, 0x3d, 0x9f, 0xd3, 0x2b, 0x62, 0x08, 0x23, 0xa3, + 0xdf, 0x83, 0xb2, 0x14, 0x31, 0x1a, 0x7b, 0xc2, 0xe5, 0x8d, 0xb0, 0x80, 0x60, 0xff, 0x3d, 0x9f, + 0xd3, 0x41, 0xb0, 0x1f, 0x8d, 0x3d, 0xd4, 0x85, 0x65, 0x39, 0x98, 0xcf, 0x46, 0x98, 0x91, 0x65, + 0x52, 0xd6, 0xc3, 0x52, 0xe2, 0x4b, 0xf5, 0x7c, 0x4e, 0x47, 0x62, 0xbc, 0xd2, 0xa9, 0x9a, 0xe4, + 0x5d, 0xf1, 0xe0, 0x1d, 0x33, 0xa9, 0x7b, 0x65, 0xc5, 0x4d, 0xea, 0x5e, 0x59, 0x4f, 0x4b, 0x50, + 0x10, 0x2d, 0xfc, 0x2f, 0x19, 0x00, 0xb9, 0x1a, 0x9d, 0x11, 0xda, 0x85, 0x9a, 0x23, 0x5a, 0x21, + 0x6f, 0xdd, 0x49, 0xf4, 0x96, 0x58, 0xc4, 0x39, 0xbd, 0x2a, 0x07, 0x71, 0xe3, 0xbe, 0x07, 0x15, + 0x5f, 0x4a, 0xe0, 0xb0, 0xdb, 0x09, 0x0e, 0xf3, 0x25, 0x94, 0xe5, 0x00, 0xea, 0xb2, 0x4f, 0x61, + 0xc5, 0x1f, 0x9f, 0xe0, 0xb3, 0xb7, 0xa6, 0xf8, 0xcc, 0x17, 0xb8, 0x24, 0x25, 0xa8, 0x5e, 0x53, + 0x0d, 0x0b, 0xdc, 0x76, 0x3b, 0xc1, 0x6d, 0x71, 0xc3, 0xa8, 0xe3, 0x80, 0xe6, 0x4b, 0xde, 0xc4, + 0xff, 0x97, 0x85, 0xc2, 0x8e, 0x3d, 0x1c, 0x19, 0x0e, 0x5d, 0x8d, 0xbc, 0x43, 0xdc, 0xf1, 0xc0, + 0x63, 0xee, 0xaa, 0x6d, 0x3d, 0x08, 0x4b, 0x14, 0x6c, 0xf2, 0xbf, 0xce, 0x58, 0x75, 0x31, 0x84, + 0x0e, 0x16, 0xe9, 0x31, 0xf3, 0x06, 0x83, 0x45, 0x72, 0x14, 0x43, 0xe4, 0x41, 0xce, 0x06, 0x07, + 0xb9, 0x09, 0x85, 0x09, 0x71, 0x82, 0x94, 0xfe, 0x7c, 0x4e, 0x97, 0x04, 0xf4, 0x1e, 0x2c, 0x44, + 0xd3, 0x4b, 0x4e, 0xf0, 0xd4, 0x7a, 0xe1, 0x6c, 0xf4, 0x00, 0x2a, 0xa1, 0x1c, 0x97, 0x17, 0x7c, + 0xe5, 0xa1, 0x92, 0xe2, 0x56, 0x65, 0x5c, 0xa5, 0xf9, 0xb8, 0xf2, 0x7c, 0x4e, 0x46, 0xd6, 0x55, + 0x19, 0x59, 0x8b, 0x62, 0x94, 0x88, 0xad, 0xa1, 0x20, 0xf3, 0xfd, 0x70, 0x90, 0xc1, 0xdf, 0x87, + 0x6a, 0xc8, 0x41, 0x34, 0xef, 0xb4, 0x3f, 0x79, 0xb9, 0x7d, 0xc0, 0x93, 0xd4, 0x33, 0x96, 0x97, + 0xf4, 0xba, 0x46, 0x73, 0xdd, 0x41, 0xfb, 0xf8, 0xb8, 0x9e, 0x41, 0x55, 0x28, 0x1d, 0x76, 0xba, + 0x27, 0x9c, 0x2b, 0x8b, 0x9f, 0xf9, 0x12, 0x44, 0x92, 0x53, 0x72, 0xdb, 0x9c, 0x92, 0xdb, 0x34, + 0x99, 0xdb, 0x32, 0x41, 0x6e, 0x63, 0x69, 0xee, 0xa0, 0xbd, 0x7d, 0xdc, 0xae, 0xcf, 0x3f, 0xad, + 0x41, 0x85, 0xfb, 0xf7, 0x64, 0x6c, 0xd1, 0x54, 0xfb, 0x0f, 0x1a, 0x40, 0x70, 0x9a, 0x50, 0x0b, + 0x0a, 0x3d, 0xae, 0xa7, 0xa1, 0xb1, 0x60, 0xb4, 0x92, 0xb8, 0x64, 0xba, 0xe4, 0x42, 0xdf, 0x82, + 0x82, 0x3b, 0xee, 0xf5, 0x88, 0x2b, 0x53, 0xde, 0xad, 0x68, 0x3c, 0x14, 0xd1, 0x4a, 0x97, 0x7c, + 0x74, 0xc8, 0x99, 0x61, 0x0e, 0xc6, 0x2c, 0x01, 0x4e, 0x1f, 0x22, 0xf8, 0xf0, 0xdf, 0x69, 0x50, + 0x56, 0x36, 0xef, 0x6f, 0x18, 0x84, 0xef, 0x42, 0x89, 0xd9, 0x40, 0xfa, 0x22, 0x0c, 0x17, 0xf5, + 0x80, 0x80, 0x7e, 0x07, 0x4a, 0xf2, 0x04, 0xc8, 0x48, 0xdc, 0x48, 0x16, 0xdb, 0x19, 0xe9, 0x01, + 0x2b, 0xde, 0x87, 0x45, 0xe6, 0x95, 0x1e, 0x2d, 0xae, 0xa5, 0x1f, 0xd5, 0xf2, 0x53, 0x8b, 0x94, + 0x9f, 0x4d, 0x28, 0x8e, 0x2e, 0xae, 0x5d, 0xb3, 0x67, 0x0c, 0x84, 0x15, 0x7e, 0x1b, 0x7f, 0x0c, + 0x48, 0x15, 0x36, 0xcb, 0x74, 0x71, 0x15, 0xca, 0xcf, 0x0d, 0xf7, 0x42, 0x98, 0x84, 0x1f, 0x43, + 0x95, 0x36, 0xf7, 0x5f, 0xbd, 0x81, 0x8d, 0xec, 0xe5, 0x40, 0x72, 0xcf, 0xe4, 0x73, 0x04, 0xf3, + 0x17, 0x86, 0x7b, 0xc1, 0x26, 0x5a, 0xd5, 0xd9, 0x33, 0x7a, 0x0f, 0xea, 0x3d, 0x3e, 0xc9, 0x93, + 0xc8, 0x2b, 0xc3, 0x82, 0xa0, 0xfb, 0x95, 0xe0, 0x67, 0x50, 0xe1, 0x73, 0xf8, 0x6d, 0x1b, 0x81, + 0x17, 0x61, 0xe1, 0xd8, 0x32, 0x46, 0xee, 0x85, 0x2d, 0xb3, 0x1b, 0x9d, 0x74, 0x3d, 0xa0, 0xcd, + 0xa4, 0xf1, 0x5d, 0x58, 0x70, 0xc8, 0xd0, 0x30, 0x2d, 0xd3, 0x3a, 0x3f, 0x39, 0xbd, 0xf6, 0x88, + 0x2b, 0x5e, 0x98, 0x6a, 0x3e, 0xf9, 0x29, 0xa5, 0x52, 0xd3, 0x4e, 0x07, 0xf6, 0xa9, 0x08, 0x73, + 0xec, 0x19, 0xff, 0x34, 0x03, 0x95, 0x4f, 0x0d, 0xaf, 0x27, 0x97, 0x0e, 0xed, 0x41, 0xcd, 0x0f, + 0x6e, 0x8c, 0x22, 0x6c, 0x89, 0xa4, 0x58, 0x36, 0x46, 0x96, 0xd2, 0x32, 0x3b, 0x56, 0x7b, 0x2a, + 0x81, 0x89, 0x32, 0xac, 0x1e, 0x19, 0xf8, 0xa2, 0x32, 0xe9, 0xa2, 0x18, 0xa3, 0x2a, 0x4a, 0x25, + 0xa0, 0x0e, 0xd4, 0x47, 0x8e, 0x7d, 0xee, 0x10, 0xd7, 0xf5, 0x85, 0xf1, 0x34, 0x86, 0x13, 0x84, + 0x1d, 0x09, 0xd6, 0x40, 0xdc, 0xc2, 0x28, 0x4c, 0x7a, 0xba, 0x10, 0xd4, 0x33, 0x3c, 0x38, 0xfd, + 0x57, 0x06, 0x50, 0x7c, 0x52, 0xbf, 0x6e, 0x89, 0xf7, 0x10, 0x6a, 0xae, 0x67, 0x38, 0xb1, 0xcd, + 0x56, 0x65, 0x54, 0x3f, 0xe2, 0xbf, 0x0b, 0xbe, 0x41, 0x27, 0x96, 0xed, 0x99, 0x67, 0xd7, 0xa2, + 0x4a, 0xae, 0x49, 0xf2, 0x21, 0xa3, 0xa2, 0x36, 0x14, 0xce, 0xcc, 0x81, 0x47, 0x1c, 0xb7, 0x91, + 0x5b, 0xcf, 0x6e, 0xd4, 0xb6, 0x1e, 0xdf, 0xb4, 0x0c, 0x9b, 0x1f, 0x31, 0xfe, 0xee, 0xf5, 0x88, + 0xe8, 0x72, 0xac, 0x5a, 0x79, 0xe6, 0x43, 0xd5, 0xf8, 0x6d, 0x28, 0xbe, 0xa6, 0x22, 0xe8, 0x5b, + 0x76, 0x81, 0x17, 0x8b, 0xac, 0xcd, 0x5f, 0xb2, 0xcf, 0x1c, 0xe3, 0x7c, 0x48, 0x2c, 0x4f, 0xbe, + 0x07, 0xca, 0x36, 0x7e, 0x08, 0x10, 0xa8, 0xa1, 0x21, 0xff, 0xb0, 0x73, 0xf4, 0xb2, 0x5b, 0x9f, + 0x43, 0x15, 0x28, 0x1e, 0x76, 0x76, 0xdb, 0x07, 0x6d, 0x9a, 0x1f, 0x70, 0x4b, 0xba, 0x34, 0xb4, + 0x96, 0xaa, 0x4e, 0x2d, 0xa4, 0x13, 0xaf, 0xc2, 0x72, 0xd2, 0x02, 0xd2, 0x5a, 0xb4, 0x2a, 0x76, + 0xe9, 0x4c, 0x47, 0x45, 0x55, 0x9d, 0x09, 0x4f, 0xb7, 0x01, 0x05, 0xbe, 0x7b, 0xfb, 0xa2, 0x38, + 0x97, 0x4d, 0xea, 0x08, 0xbe, 0x19, 0x49, 0x5f, 0xac, 0x92, 0xdf, 0x4e, 0x0c, 0x2f, 0xb9, 0xc4, + 0xf0, 0x82, 0x1e, 0x40, 0xd5, 0x3f, 0x0d, 0x86, 0x2b, 0x6a, 0x81, 0x92, 0x5e, 0x91, 0x1b, 0x9d, + 0xd2, 0x42, 0x4e, 0x2f, 0x84, 0x9d, 0x8e, 0x1e, 0x42, 0x9e, 0x4c, 0x88, 0xe5, 0xb9, 0x8d, 0x32, + 0xcb, 0x18, 0x55, 0x59, 0xbb, 0xb7, 0x29, 0x55, 0x17, 0x9d, 0xf8, 0xbb, 0xb0, 0xc8, 0xde, 0x91, + 0x9e, 0x39, 0x86, 0xa5, 0xbe, 0xcc, 0x75, 0xbb, 0x07, 0xc2, 0xdd, 0xf4, 0x11, 0xd5, 0x20, 0xb3, + 0xb7, 0x2b, 0x9c, 0x90, 0xd9, 0xdb, 0xc5, 0x3f, 0xd6, 0x00, 0xa9, 0xe3, 0x66, 0xf2, 0x73, 0x44, + 0xb8, 0x54, 0x9f, 0x0d, 0xd4, 0x2f, 0x43, 0x8e, 0x38, 0x8e, 0xed, 0x30, 0x8f, 0x96, 0x74, 0xde, + 0xc0, 0x6f, 0x0b, 0x1b, 0x74, 0x32, 0xb1, 0x2f, 0xfd, 0x33, 0xc8, 0xa5, 0x69, 0xbe, 0xa9, 0xfb, + 0xb0, 0x14, 0xe2, 0x9a, 0x29, 0x73, 0x7d, 0x04, 0x0b, 0x4c, 0xd8, 0xce, 0x05, 0xe9, 0x5d, 0x8e, + 0x6c, 0xd3, 0x8a, 0xe9, 0xa3, 0x2b, 0x17, 0x04, 0x58, 0x3a, 0x0f, 0x3e, 0xb1, 0x8a, 0x4f, 0xec, + 0x76, 0x0f, 0xf0, 0xe7, 0xb0, 0x1a, 0x91, 0x23, 0xcd, 0xff, 0x43, 0x28, 0xf7, 0x7c, 0xa2, 0x2b, + 0x6a, 0x9d, 0x7b, 0x61, 0xe3, 0xa2, 0x43, 0xd5, 0x11, 0xb8, 0x03, 0xb7, 0x62, 0xa2, 0x67, 0x9a, + 0xf3, 0xbb, 0xb0, 0xc2, 0x04, 0xee, 0x13, 0x32, 0xda, 0x1e, 0x98, 0x93, 0x54, 0x4f, 0x8f, 0xc4, + 0xa4, 0x14, 0xc6, 0xaf, 0x77, 0x5f, 0xe0, 0xdf, 0x17, 0x1a, 0xbb, 0xe6, 0x90, 0x74, 0xed, 0x83, + 0x74, 0xdb, 0x68, 0x36, 0xbb, 0x24, 0xd7, 0xae, 0x28, 0x6b, 0xd8, 0x33, 0xfe, 0x47, 0x4d, 0xb8, + 0x4a, 0x1d, 0xfe, 0x35, 0xef, 0xe4, 0x35, 0x80, 0x73, 0x7a, 0x64, 0x48, 0x9f, 0x76, 0x70, 0x44, + 0x45, 0xa1, 0xf8, 0x76, 0xd2, 0xf8, 0x5d, 0x11, 0x76, 0x2e, 0x8b, 0x7d, 0xce, 0xfe, 0xf8, 0x51, + 0xee, 0x1e, 0x94, 0x19, 0xe1, 0xd8, 0x33, 0xbc, 0xb1, 0x1b, 0x5b, 0x8c, 0x3f, 0x17, 0xdb, 0x5e, + 0x0e, 0x9a, 0x69, 0x5e, 0xdf, 0x82, 0x3c, 0x7b, 0x99, 0x90, 0xa5, 0xf4, 0xed, 0x84, 0xfd, 0xc8, + 0xed, 0xd0, 0x05, 0x23, 0xfe, 0xa9, 0x06, 0xf9, 0x17, 0x0c, 0x82, 0x55, 0x4c, 0x9b, 0x97, 0x6b, + 0x61, 0x19, 0x43, 0x0e, 0x0c, 0x95, 0x74, 0xf6, 0xcc, 0x4a, 0x4f, 0x42, 0x9c, 0x97, 0xfa, 0x01, + 0x2f, 0x71, 0x4b, 0xba, 0xdf, 0xa6, 0x3e, 0xeb, 0x0d, 0x4c, 0x62, 0x79, 0xac, 0x77, 0x9e, 0xf5, + 0x2a, 0x14, 0x5a, 0x3d, 0x9b, 0xee, 0x01, 0x31, 0x1c, 0x4b, 0x80, 0xa6, 0x45, 0x3d, 0x20, 0xe0, + 0x03, 0xa8, 0x73, 0x3b, 0xb6, 0xfb, 0x7d, 0xa5, 0xc0, 0xf4, 0xb5, 0x69, 0x11, 0x6d, 0x21, 0x69, + 0x99, 0xa8, 0xb4, 0x7f, 0xd2, 0x60, 0x51, 0x11, 0x37, 0x93, 0x57, 0xdf, 0x87, 0x3c, 0x07, 0xa9, + 0x45, 0xa5, 0xb3, 0x1c, 0x1e, 0xc5, 0xd5, 0xe8, 0x82, 0x07, 0x6d, 0x42, 0x81, 0x3f, 0xc9, 0x77, + 0x80, 0x64, 0x76, 0xc9, 0x84, 0x1f, 0xc2, 0x92, 0x20, 0x91, 0xa1, 0x9d, 0x74, 0x30, 0xd8, 0x62, + 0xe0, 0x3f, 0x85, 0xe5, 0x30, 0xdb, 0x4c, 0x53, 0x52, 0x8c, 0xcc, 0xbc, 0x89, 0x91, 0xdb, 0xd2, + 0xc8, 0x97, 0xa3, 0xbe, 0x52, 0x47, 0x45, 0x77, 0x8c, 0xba, 0x5e, 0x99, 0xf0, 0x7a, 0x05, 0x13, + 0x90, 0x22, 0xbe, 0xd1, 0x09, 0x2c, 0xc9, 0xed, 0x70, 0x60, 0xba, 0x7e, 0xb9, 0xfe, 0x25, 0x20, + 0x95, 0xf8, 0x8d, 0x1a, 0xf4, 0x8e, 0x74, 0xc7, 0x91, 0x63, 0x0f, 0xed, 0x54, 0x97, 0xe2, 0x3f, + 0x83, 0x95, 0x08, 0xdf, 0x37, 0xed, 0xb7, 0x5d, 0x22, 0x8b, 0x15, 0xe9, 0xb7, 0x8f, 0x01, 0xa9, + 0xc4, 0x99, 0xb2, 0x56, 0x0b, 0x16, 0x5f, 0xd8, 0x13, 0x1a, 0xfe, 0x28, 0x35, 0x38, 0xf7, 0x1c, + 0x63, 0xf0, 0x5d, 0xe1, 0xb7, 0xa9, 0x72, 0x75, 0xc0, 0x4c, 0xca, 0xff, 0x43, 0x83, 0xca, 0xf6, + 0xc0, 0x70, 0x86, 0x52, 0xf1, 0xf7, 0x20, 0xcf, 0xdf, 0x9c, 0x05, 0x58, 0xf5, 0x4e, 0x58, 0x8c, + 0xca, 0xcb, 0x1b, 0xdb, 0xfc, 0x3d, 0x5b, 0x8c, 0xa2, 0x86, 0x8b, 0xef, 0x59, 0xbb, 0x91, 0xef, + 0x5b, 0xbb, 0xe8, 0x03, 0xc8, 0x19, 0x74, 0x08, 0x4b, 0x33, 0xb5, 0x28, 0x66, 0xc1, 0xa4, 0xb1, + 0xfa, 0x9e, 0x73, 0xe1, 0xef, 0x40, 0x59, 0xd1, 0x80, 0x0a, 0x90, 0x7d, 0xd6, 0x16, 0xc5, 0xf8, + 0xf6, 0x4e, 0x77, 0xef, 0x15, 0x07, 0x6b, 0x6a, 0x00, 0xbb, 0x6d, 0xbf, 0x9d, 0xc1, 0x9f, 0x89, + 0x51, 0x22, 0xa4, 0xab, 0xf6, 0x68, 0x69, 0xf6, 0x64, 0xde, 0xc8, 0x9e, 0x2b, 0xa8, 0x8a, 0xe9, + 0xcf, 0x9a, 0xa2, 0x98, 0xbc, 0x94, 0x14, 0xa5, 0x18, 0xaf, 0x0b, 0x46, 0xbc, 0x00, 0x55, 0x91, + 0xb4, 0xc4, 0xfe, 0xfb, 0xf7, 0x0c, 0xd4, 0x24, 0x65, 0x56, 0x50, 0x5d, 0xe2, 0x81, 0x3c, 0xc9, + 0xf9, 0x68, 0xe0, 0x2a, 0xe4, 0xfb, 0xa7, 0xc7, 0xe6, 0x97, 0xf2, 0x03, 0x88, 0x68, 0x51, 0xfa, + 0x80, 0xeb, 0xe1, 0x5f, 0x21, 0x45, 0x8b, 0x66, 0x23, 0xc7, 0x38, 0xf3, 0xf6, 0xac, 0x3e, 0xb9, + 0x62, 0xb9, 0x6d, 0x5e, 0x0f, 0x08, 0x0c, 0x28, 0x11, 0x5f, 0x2b, 0xd9, 0x0b, 0x82, 0xf2, 0xf5, + 0x12, 0x3d, 0x82, 0x3a, 0x7d, 0xde, 0x1e, 0x8d, 0x06, 0x26, 0xe9, 0x73, 0x01, 0x05, 0xc6, 0x13, + 0xa3, 0x53, 0xed, 0xac, 0xa4, 0x76, 0x1b, 0x45, 0x16, 0x5d, 0x45, 0x0b, 0xad, 0x43, 0x99, 0xdb, + 0xb7, 0x67, 0xbd, 0x74, 0x09, 0xfb, 0x84, 0x97, 0xd5, 0x55, 0x52, 0x38, 0x5b, 0x42, 0x34, 0x5b, + 0x2e, 0xc1, 0xe2, 0xf6, 0xd8, 0xbb, 0x68, 0x5b, 0xc6, 0xe9, 0x40, 0x46, 0x22, 0x5a, 0xce, 0x50, + 0xe2, 0xae, 0xe9, 0xaa, 0xd4, 0x36, 0x2c, 0x51, 0x2a, 0xb1, 0x3c, 0xb3, 0xa7, 0x64, 0x02, 0x59, + 0x2b, 0x68, 0x91, 0x5a, 0xc1, 0x70, 0xdd, 0xd7, 0xb6, 0xd3, 0x17, 0xee, 0xf5, 0xdb, 0x78, 0xc2, + 0x85, 0xbf, 0x74, 0x43, 0xf9, 0xfe, 0xd7, 0x94, 0x82, 0x3e, 0x84, 0x82, 0x3d, 0x62, 0x9f, 0xa4, + 0x05, 0x6e, 0xb0, 0xba, 0xc9, 0x3f, 0x62, 0x6f, 0x0a, 0xc1, 0x1d, 0xde, 0xab, 0x4b, 0x36, 0xbc, + 0x11, 0xe8, 0x7d, 0x46, 0xbc, 0x29, 0x7a, 0xf1, 0x63, 0x58, 0x91, 0x9c, 0x02, 0x26, 0x9f, 0xc2, + 0xdc, 0x81, 0x7b, 0x92, 0x79, 0xe7, 0xc2, 0xb0, 0xce, 0xc9, 0x91, 0x30, 0xf1, 0x37, 0xf5, 0xcf, + 0x53, 0x68, 0xf8, 0x76, 0xb2, 0x57, 0x37, 0x7b, 0xa0, 0x1a, 0x30, 0x76, 0xc5, 0x4e, 0x2f, 0xe9, + 0xec, 0x99, 0xd2, 0x1c, 0x7b, 0xe0, 0xd7, 0x6a, 0xf4, 0x19, 0xef, 0xc0, 0x6d, 0x29, 0x43, 0xbc, + 0x54, 0x85, 0x85, 0xc4, 0x0c, 0x4a, 0x12, 0x22, 0x1c, 0x46, 0x87, 0x4e, 0x5f, 0x28, 0x95, 0x33, + 0xec, 0x5a, 0x26, 0x53, 0x53, 0x64, 0xae, 0xf0, 0x3d, 0x44, 0x0d, 0x53, 0xd3, 0xb1, 0x20, 0x53, + 0x01, 0x2a, 0x59, 0x2c, 0x04, 0x25, 0xc7, 0x16, 0x22, 0x26, 0xfa, 0x07, 0xb0, 0xe6, 0x1b, 0x41, + 0xfd, 0x76, 0x44, 0x9c, 0xa1, 0xe9, 0xba, 0x0a, 0xb0, 0x9a, 0x34, 0xf1, 0x77, 0x60, 0x7e, 0x44, + 0x44, 0x24, 0x2c, 0x6f, 0x21, 0xb9, 0x89, 0x94, 0xc1, 0xac, 0x1f, 0xf7, 0xe1, 0xbe, 0x94, 0xce, + 0x3d, 0x9a, 0x28, 0x3e, 0x6a, 0x94, 0x84, 0x9b, 0x32, 0x29, 0x70, 0x53, 0x36, 0x02, 0xf6, 0x7f, + 0xcc, 0x1d, 0x29, 0x4f, 0xe3, 0x4c, 0x19, 0x6e, 0x9f, 0xfb, 0xd4, 0x3f, 0xc4, 0x33, 0x09, 0x3b, + 0x85, 0xe5, 0xf0, 0xd9, 0x9f, 0x29, 0xf8, 0x2e, 0x43, 0xce, 0xb3, 0x2f, 0x89, 0x0c, 0xbd, 0xbc, + 0x21, 0x0d, 0xf6, 0x03, 0xc3, 0x4c, 0x06, 0x1b, 0x81, 0x30, 0xb6, 0x25, 0x67, 0xb5, 0x97, 0xae, + 0xa6, 0xac, 0x6c, 0x79, 0x03, 0x1f, 0xc2, 0x6a, 0x34, 0x4c, 0xcc, 0x64, 0xf2, 0x2b, 0xbe, 0x81, + 0x93, 0x22, 0xc9, 0x4c, 0x72, 0x3f, 0x09, 0x82, 0x81, 0x12, 0x50, 0x66, 0x12, 0xa9, 0x43, 0x33, + 0x29, 0xbe, 0xfc, 0x36, 0xf6, 0xab, 0x1f, 0x6e, 0x66, 0x12, 0xe6, 0x06, 0xc2, 0x66, 0x5f, 0xfe, + 0x20, 0x46, 0x64, 0xa7, 0xc6, 0x08, 0x71, 0x48, 0x82, 0x28, 0xf6, 0x35, 0x6c, 0x3a, 0xa1, 0x23, + 0x08, 0xa0, 0xb3, 0xea, 0xa0, 0x39, 0xc4, 0xd7, 0xc1, 0x1a, 0x72, 0x63, 0xab, 0x61, 0x77, 0xa6, + 0xc5, 0xf8, 0x34, 0x88, 0x9d, 0xb1, 0xc8, 0x3c, 0x93, 0xe0, 0xcf, 0x60, 0x3d, 0x3d, 0x28, 0xcf, + 0x22, 0xf9, 0x51, 0x0b, 0x4a, 0x7e, 0x19, 0xac, 0xdc, 0x22, 0x2a, 0x43, 0xe1, 0xb0, 0x73, 0x7c, + 0xb4, 0xbd, 0xd3, 0xe6, 0xd7, 0x88, 0x76, 0x3a, 0xba, 0xfe, 0xf2, 0xa8, 0x5b, 0xcf, 0x6c, 0xfd, + 0x32, 0x0b, 0x99, 0xfd, 0x57, 0xe8, 0x73, 0xc8, 0xf1, 0x6f, 0xea, 0x53, 0x2e, 0x52, 0x34, 0xa7, + 0x5d, 0x1b, 0xc0, 0xb7, 0x7e, 0xfc, 0xdf, 0xbf, 0xfc, 0x79, 0x66, 0x11, 0x57, 0x5a, 0x93, 0x6f, + 0xb7, 0x2e, 0x27, 0x2d, 0x96, 0x1b, 0x9e, 0x68, 0x8f, 0xd0, 0x27, 0x90, 0x3d, 0x1a, 0x7b, 0x28, + 0xf5, 0x82, 0x45, 0x33, 0xfd, 0x26, 0x01, 0x5e, 0x61, 0x42, 0x17, 0x30, 0x08, 0xa1, 0xa3, 0xb1, + 0x47, 0x45, 0xfe, 0x10, 0xca, 0xea, 0x3d, 0x80, 0x1b, 0x6f, 0x5d, 0x34, 0x6f, 0xbe, 0x63, 0x80, + 0xef, 0x31, 0x55, 0xb7, 0x30, 0x12, 0xaa, 0xf8, 0x4d, 0x05, 0x75, 0x16, 0xdd, 0x2b, 0x0b, 0xa5, + 0xde, 0xc9, 0x68, 0xa6, 0x5f, 0x3b, 0x88, 0xcd, 0xc2, 0xbb, 0xb2, 0xa8, 0xc8, 0x3f, 0x16, 0x37, + 0x0e, 0x7a, 0x1e, 0xba, 0x9f, 0xf0, 0xc5, 0x59, 0xfd, 0xb6, 0xda, 0x5c, 0x4f, 0x67, 0x10, 0x4a, + 0xee, 0x32, 0x25, 0xab, 0x78, 0x51, 0x28, 0xe9, 0xf9, 0x2c, 0x4f, 0xb4, 0x47, 0x5b, 0x3d, 0xc8, + 0xb1, 0xef, 0x16, 0xe8, 0x0b, 0xf9, 0xd0, 0x4c, 0xf8, 0x80, 0x93, 0xb2, 0xd0, 0xa1, 0x2f, 0x1e, + 0x78, 0x99, 0x29, 0xaa, 0xe1, 0x12, 0x55, 0xc4, 0xbe, 0x5a, 0x3c, 0xd1, 0x1e, 0x6d, 0x68, 0x1f, + 0x6a, 0x5b, 0xff, 0x9c, 0x83, 0x1c, 0x03, 0xec, 0xd0, 0x25, 0x40, 0x80, 0xe1, 0x47, 0x67, 0x17, + 0xfb, 0x2a, 0x10, 0x9d, 0x5d, 0x1c, 0xfe, 0xc7, 0x4d, 0xa6, 0x74, 0x19, 0x2f, 0x50, 0xa5, 0x0c, + 0x07, 0x6c, 0x31, 0x68, 0x93, 0xfa, 0xf1, 0xaf, 0x34, 0x81, 0x57, 0xf2, 0xb3, 0x84, 0x92, 0xa4, + 0x85, 0x80, 0xfc, 0xe8, 0x76, 0x48, 0x00, 0xf1, 0xf1, 0x77, 0x99, 0xc2, 0x16, 0xae, 0x07, 0x0a, + 0x1d, 0xc6, 0xf1, 0x44, 0x7b, 0xf4, 0x45, 0x03, 0x2f, 0x09, 0x2f, 0x47, 0x7a, 0xd0, 0x8f, 0xa0, + 0x16, 0x06, 0xaa, 0xd1, 0x83, 0x04, 0x5d, 0x51, 0xbc, 0xbb, 0xf9, 0xf6, 0x74, 0x26, 0x61, 0xd3, + 0x1a, 0xb3, 0x49, 0x28, 0xe7, 0x9a, 0x2f, 0x09, 0x19, 0x19, 0x94, 0x49, 0xac, 0x01, 0xfa, 0x7b, + 0x4d, 0x7c, 0x47, 0x08, 0x90, 0x67, 0x94, 0x24, 0x3d, 0x86, 0x6b, 0x37, 0x1f, 0xde, 0xc0, 0x25, + 0x8c, 0xf8, 0x03, 0x66, 0xc4, 0xef, 0xe2, 0xe5, 0xc0, 0x08, 0xcf, 0x1c, 0x12, 0xcf, 0x16, 0x56, + 0x7c, 0x71, 0x17, 0xdf, 0x0a, 0x39, 0x27, 0xd4, 0x1b, 0x2c, 0x16, 0x47, 0x8f, 0x13, 0x17, 0x2b, + 0x84, 0x46, 0x27, 0x2e, 0x56, 0x18, 0x7a, 0x4e, 0x5a, 0x2c, 0x8e, 0x15, 0x27, 0x2d, 0x96, 0xdf, + 0xb3, 0xf5, 0xff, 0xf3, 0x50, 0xd8, 0xe1, 0x37, 0x7d, 0x91, 0x0d, 0x25, 0x1f, 0x7c, 0x45, 0x6b, + 0x49, 0x08, 0x53, 0xf0, 0x2e, 0xd1, 0xbc, 0x9f, 0xda, 0x2f, 0x0c, 0x7a, 0x8b, 0x19, 0x74, 0x07, + 0xaf, 0x52, 0xcd, 0xe2, 0x32, 0x71, 0x8b, 0xc3, 0x18, 0x2d, 0xa3, 0xdf, 0xa7, 0x8e, 0xf8, 0x13, + 0xa8, 0xa8, 0xe8, 0x28, 0x7a, 0x2b, 0x11, 0xd5, 0x52, 0x01, 0xd6, 0x26, 0x9e, 0xc6, 0x22, 0x34, + 0xbf, 0xcd, 0x34, 0xaf, 0xe1, 0xdb, 0x09, 0x9a, 0x1d, 0xc6, 0x1a, 0x52, 0xce, 0x91, 0xcd, 0x64, + 0xe5, 0x21, 0xe0, 0x34, 0x59, 0x79, 0x18, 0x18, 0x9d, 0xaa, 0x7c, 0xcc, 0x58, 0xa9, 0x72, 0x17, + 0x20, 0xc0, 0x30, 0x51, 0xa2, 0x2f, 0x95, 0x97, 0xa9, 0x68, 0x70, 0x88, 0xc3, 0x9f, 0x18, 0x33, + 0xb5, 0x62, 0xdf, 0x45, 0xd4, 0x0e, 0x4c, 0xd7, 0xe3, 0x07, 0xb3, 0x1a, 0x02, 0x25, 0x51, 0xe2, + 0x7c, 0xc2, 0xc8, 0x66, 0xf3, 0xc1, 0x54, 0x1e, 0xa1, 0xfd, 0x21, 0xd3, 0x7e, 0x1f, 0x37, 0x13, + 0xb4, 0x8f, 0x38, 0x2f, 0xdd, 0x6c, 0x7f, 0x9d, 0x87, 0xf2, 0x0b, 0xc3, 0xb4, 0x3c, 0x62, 0x19, + 0x56, 0x8f, 0xa0, 0x53, 0xc8, 0xb1, 0x4c, 0x1d, 0x0d, 0xc4, 0x2a, 0x60, 0x17, 0x0d, 0xc4, 0x21, + 0x34, 0x0b, 0xaf, 0x33, 0xc5, 0x4d, 0xbc, 0x42, 0x15, 0x0f, 0x03, 0xd1, 0x2d, 0x06, 0x42, 0xd1, + 0x49, 0x9f, 0x41, 0x5e, 0x7c, 0xc3, 0x89, 0x08, 0x0a, 0x81, 0x53, 0xcd, 0xbb, 0xc9, 0x9d, 0x49, + 0x7b, 0x59, 0x55, 0xe3, 0x32, 0x3e, 0xaa, 0x67, 0x02, 0x10, 0xa0, 0xab, 0xd1, 0x15, 0x8d, 0x81, + 0xb1, 0xcd, 0xf5, 0x74, 0x86, 0x24, 0x9f, 0xaa, 0x3a, 0xfb, 0x3e, 0x2f, 0xd5, 0xfb, 0x47, 0x30, + 0xff, 0xdc, 0x70, 0x2f, 0x50, 0x24, 0xf7, 0x2a, 0x37, 0x80, 0x9a, 0xcd, 0xa4, 0x2e, 0xa1, 0xe5, + 0x3e, 0xd3, 0x72, 0x9b, 0x87, 0x32, 0x55, 0xcb, 0x85, 0xe1, 0xd2, 0xa4, 0x86, 0xfa, 0x90, 0xe7, + 0x17, 0x82, 0xa2, 0xfe, 0x0b, 0x5d, 0x2a, 0x8a, 0xfa, 0x2f, 0x7c, 0x87, 0xe8, 0x66, 0x2d, 0x23, + 0x28, 0xca, 0x1b, 0x38, 0x28, 0xf2, 0x39, 0x36, 0x72, 0x5b, 0xa7, 0xb9, 0x96, 0xd6, 0x2d, 0x74, + 0x3d, 0x60, 0xba, 0xee, 0xe1, 0x46, 0x6c, 0xad, 0x04, 0xe7, 0x13, 0xed, 0xd1, 0x87, 0x1a, 0xfa, + 0x11, 0x40, 0x00, 0x48, 0xc7, 0x4e, 0x60, 0x14, 0xdb, 0x8e, 0x9d, 0xc0, 0x18, 0x96, 0x8d, 0x37, + 0x99, 0xde, 0x0d, 0xfc, 0x20, 0xaa, 0xd7, 0x73, 0x0c, 0xcb, 0x3d, 0x23, 0xce, 0x07, 0x1c, 0x74, + 0x74, 0x2f, 0xcc, 0x11, 0x3d, 0x0c, 0xff, 0xba, 0x00, 0xf3, 0xb4, 0x02, 0xa6, 0x85, 0x42, 0x00, + 0x1c, 0x44, 0x2d, 0x89, 0x01, 0x7c, 0x51, 0x4b, 0xe2, 0x98, 0x43, 0xb8, 0x50, 0x60, 0xbf, 0x11, + 0x21, 0x8c, 0x81, 0x3a, 0xda, 0x86, 0xb2, 0x82, 0x2c, 0xa0, 0x04, 0x61, 0x61, 0xe4, 0x30, 0x9a, + 0x7a, 0x12, 0x60, 0x09, 0x7c, 0x87, 0xe9, 0x5b, 0xe1, 0xa9, 0x87, 0xe9, 0xeb, 0x73, 0x0e, 0xaa, + 0xf0, 0x35, 0x54, 0x54, 0xf4, 0x01, 0x25, 0xc8, 0x8b, 0xa0, 0x92, 0xd1, 0x30, 0x9b, 0x04, 0x5e, + 0x84, 0x0f, 0xbe, 0xff, 0x3b, 0x18, 0xc9, 0x46, 0x15, 0x0f, 0xa0, 0x20, 0xe0, 0x88, 0xa4, 0x59, + 0x86, 0x21, 0xcc, 0xa4, 0x59, 0x46, 0xb0, 0x8c, 0x70, 0x71, 0xc9, 0x34, 0xd2, 0x37, 0x2e, 0x99, + 0xca, 0x84, 0xb6, 0x67, 0xc4, 0x4b, 0xd3, 0x16, 0xa0, 0x6b, 0x69, 0xda, 0x94, 0xb7, 0xdd, 0x34, + 0x6d, 0xe7, 0xc4, 0x13, 0xc7, 0x45, 0xbe, 0x45, 0xa2, 0x14, 0x61, 0x6a, 0xfa, 0xc0, 0xd3, 0x58, + 0x92, 0x6a, 0xff, 0x40, 0xa1, 0xcc, 0x1d, 0x57, 0x00, 0x01, 0x58, 0x12, 0x2d, 0xe8, 0x12, 0x11, + 0xd7, 0x68, 0x41, 0x97, 0x8c, 0xb7, 0x84, 0x43, 0x43, 0xa0, 0x97, 0xbf, 0x7a, 0x50, 0xcd, 0x3f, + 0xd3, 0x00, 0xc5, 0x71, 0x15, 0xf4, 0x38, 0x59, 0x7a, 0x22, 0x8e, 0xdb, 0x7c, 0xff, 0xcd, 0x98, + 0x93, 0xa2, 0x7d, 0x60, 0x52, 0x8f, 0x71, 0x8f, 0x5e, 0x53, 0xa3, 0xfe, 0x42, 0x83, 0x6a, 0x08, + 0x94, 0x41, 0xef, 0xa4, 0xac, 0x69, 0x04, 0x06, 0x6e, 0xbe, 0x7b, 0x23, 0x5f, 0x52, 0xa5, 0xab, + 0xec, 0x00, 0x59, 0xf2, 0xff, 0x44, 0x83, 0x5a, 0x18, 0xc4, 0x41, 0x29, 0xb2, 0x63, 0x30, 0x72, + 0x73, 0xe3, 0x66, 0xc6, 0xe9, 0xcb, 0x13, 0x54, 0xfb, 0x03, 0x28, 0x08, 0xd8, 0x27, 0x69, 0xe3, + 0x87, 0x01, 0xe8, 0xa4, 0x8d, 0x1f, 0xc1, 0x8c, 0x12, 0x36, 0xbe, 0x63, 0x0f, 0x88, 0x72, 0xcc, + 0x04, 0x2e, 0x94, 0xa6, 0x6d, 0xfa, 0x31, 0x8b, 0x80, 0x4a, 0x69, 0xda, 0x82, 0x63, 0x26, 0x01, + 0x21, 0x94, 0x22, 0xec, 0x86, 0x63, 0x16, 0xc5, 0x93, 0x12, 0x8e, 0x19, 0x53, 0xa8, 0x1c, 0xb3, + 0x00, 0xba, 0x49, 0x3a, 0x66, 0x31, 0x3c, 0x3d, 0xe9, 0x98, 0xc5, 0xd1, 0x9f, 0x84, 0x75, 0x64, + 0x7a, 0x43, 0xc7, 0x6c, 0x29, 0x01, 0xe5, 0x41, 0xef, 0xa7, 0x38, 0x31, 0x11, 0xa6, 0x6f, 0x7e, + 0xf0, 0x86, 0xdc, 0xa9, 0x7b, 0x9c, 0xbb, 0x5f, 0xee, 0xf1, 0xbf, 0xd1, 0x60, 0x39, 0x09, 0x21, + 0x42, 0x29, 0x7a, 0x52, 0xe0, 0xfd, 0xe6, 0xe6, 0x9b, 0xb2, 0x4f, 0xf7, 0x96, 0xbf, 0xeb, 0x9f, + 0xd6, 0xff, 0xed, 0xab, 0x35, 0xed, 0x3f, 0xbf, 0x5a, 0xd3, 0xfe, 0xe7, 0xab, 0x35, 0xed, 0x6f, + 0xff, 0x77, 0x6d, 0xee, 0x34, 0xcf, 0x7e, 0x5d, 0xf9, 0xed, 0x5f, 0x05, 0x00, 0x00, 0xff, 0xff, + 0x52, 0x4e, 0xd7, 0x33, 0xe4, 0x39, 0x00, 0x00, +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto new file mode 100644 index 0000000000..423eabada4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto @@ -0,0 +1,1146 @@ +syntax = "proto3"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; +import "etcd/mvcc/mvccpb/kv.proto"; +import "etcd/auth/authpb/auth.proto"; + +// for grpc-gateway +import "google/api/annotations.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service KV { + // Range gets the keys in the range from the key-value store. + rpc Range(RangeRequest) returns (RangeResponse) { + option (google.api.http) = { + post: "/v3/kv/range" + body: "*" + }; + } + + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + rpc Put(PutRequest) returns (PutResponse) { + option (google.api.http) = { + post: "/v3/kv/put" + body: "*" + }; + } + + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) { + option (google.api.http) = { + post: "/v3/kv/deleterange" + body: "*" + }; + } + + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + rpc Txn(TxnRequest) returns (TxnResponse) { + option (google.api.http) = { + post: "/v3/kv/txn" + body: "*" + }; + } + + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + rpc Compact(CompactionRequest) returns (CompactionResponse) { + option (google.api.http) = { + post: "/v3/kv/compaction" + body: "*" + }; + } +} + +service Watch { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + rpc Watch(stream WatchRequest) returns (stream WatchResponse) { + option (google.api.http) = { + post: "/v3/watch" + body: "*" + }; + } +} + +service Lease { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) { + option (google.api.http) = { + post: "/v3/lease/grant" + body: "*" + }; + } + + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) { + option (google.api.http) = { + post: "/v3/lease/revoke" + body: "*" + additional_bindings { + post: "/v3/kv/lease/revoke" + body: "*" + } + }; + } + + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) { + option (google.api.http) = { + post: "/v3/lease/keepalive" + body: "*" + }; + } + + // LeaseTimeToLive retrieves lease information. + rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) { + option (google.api.http) = { + post: "/v3/lease/timetolive" + body: "*" + additional_bindings { + post: "/v3/kv/lease/timetolive" + body: "*" + } + }; + } + + // LeaseLeases lists all existing leases. + rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) { + option (google.api.http) = { + post: "/v3/lease/leases" + body: "*" + additional_bindings { + post: "/v3/kv/lease/leases" + body: "*" + } + }; + } +} + +service Cluster { + // MemberAdd adds a member into the cluster. + rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/add" + body: "*" + }; + } + + // MemberRemove removes an existing member from the cluster. + rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/remove" + body: "*" + }; + } + + // MemberUpdate updates the member configuration. + rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/update" + body: "*" + }; + } + + // MemberList lists all the members in the cluster. + rpc MemberList(MemberListRequest) returns (MemberListResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/list" + body: "*" + }; + } + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + rpc MemberPromote(MemberPromoteRequest) returns (MemberPromoteResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/promote" + body: "*" + }; + } +} + +service Maintenance { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + rpc Alarm(AlarmRequest) returns (AlarmResponse) { + option (google.api.http) = { + post: "/v3/maintenance/alarm" + body: "*" + }; + } + + // Status gets the status of the member. + rpc Status(StatusRequest) returns (StatusResponse) { + option (google.api.http) = { + post: "/v3/maintenance/status" + body: "*" + }; + } + + // Defragment defragments a member's backend database to recover storage space. + rpc Defragment(DefragmentRequest) returns (DefragmentResponse) { + option (google.api.http) = { + post: "/v3/maintenance/defragment" + body: "*" + }; + } + + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. + rpc Hash(HashRequest) returns (HashResponse) { + option (google.api.http) = { + post: "/v3/maintenance/hash" + body: "*" + }; + } + + // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. + rpc HashKV(HashKVRequest) returns (HashKVResponse) { + option (google.api.http) = { + post: "/v3/maintenance/hash" + body: "*" + }; + } + + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) { + option (google.api.http) = { + post: "/v3/maintenance/snapshot" + body: "*" + }; + } + + // MoveLeader requests current leader node to transfer its leadership to transferee. + rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) { + option (google.api.http) = { + post: "/v3/maintenance/transfer-leadership" + body: "*" + }; + } +} + +service Auth { + // AuthEnable enables authentication. + rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) { + option (google.api.http) = { + post: "/v3/auth/enable" + body: "*" + }; + } + + // AuthDisable disables authentication. + rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) { + option (google.api.http) = { + post: "/v3/auth/disable" + body: "*" + }; + } + + // Authenticate processes an authenticate request. + rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) { + option (google.api.http) = { + post: "/v3/auth/authenticate" + body: "*" + }; + } + + // UserAdd adds a new user. User name cannot be empty. + rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) { + option (google.api.http) = { + post: "/v3/auth/user/add" + body: "*" + }; + } + + // UserGet gets detailed user information. + rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) { + option (google.api.http) = { + post: "/v3/auth/user/get" + body: "*" + }; + } + + // UserList gets a list of all users. + rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) { + option (google.api.http) = { + post: "/v3/auth/user/list" + body: "*" + }; + } + + // UserDelete deletes a specified user. + rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) { + option (google.api.http) = { + post: "/v3/auth/user/delete" + body: "*" + }; + } + + // UserChangePassword changes the password of a specified user. + rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) { + option (google.api.http) = { + post: "/v3/auth/user/changepw" + body: "*" + }; + } + + // UserGrant grants a role to a specified user. + rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) { + option (google.api.http) = { + post: "/v3/auth/user/grant" + body: "*" + }; + } + + // UserRevokeRole revokes a role of specified user. + rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) { + option (google.api.http) = { + post: "/v3/auth/user/revoke" + body: "*" + }; + } + + // RoleAdd adds a new role. Role name cannot be empty. + rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) { + option (google.api.http) = { + post: "/v3/auth/role/add" + body: "*" + }; + } + + // RoleGet gets detailed role information. + rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) { + option (google.api.http) = { + post: "/v3/auth/role/get" + body: "*" + }; + } + + // RoleList gets lists of all roles. + rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) { + option (google.api.http) = { + post: "/v3/auth/role/list" + body: "*" + }; + } + + // RoleDelete deletes a specified role. + rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) { + option (google.api.http) = { + post: "/v3/auth/role/delete" + body: "*" + }; + } + + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) { + option (google.api.http) = { + post: "/v3/auth/role/grant" + body: "*" + }; + } + + // RoleRevokePermission revokes a key or range permission of a specified role. + rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) { + option (google.api.http) = { + post: "/v3/auth/role/revoke" + body: "*" + }; + } +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + // member_id is the ID of the member which sent the response. + uint64 member_id = 2; + // revision is the key-value store revision when the request was applied. + // For watch progress responses, the header.revision indicates progress. All future events + // recieved in this stream are guaranteed to have a higher revision number than the + // header.revision number. + int64 revision = 3; + // raft_term is the raft term when the request was applied. + uint64 raft_term = 4; +} + +message RangeRequest { + enum SortOrder { + NONE = 0; // default, no sorting + ASCEND = 1; // lowest target value first + DESCEND = 2; // highest target value first + } + enum SortTarget { + KEY = 0; + VERSION = 1; + CREATE = 2; + MOD = 3; + VALUE = 4; + } + + // key is the first key for the range. If range_end is not given, the request only looks up key. + bytes key = 1; + // range_end is the upper bound on the requested range [key, range_end). + // If range_end is '\0', the range is all keys >= key. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. + bytes range_end = 2; + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. + int64 limit = 3; + // revision is the point-in-time of the key-value store to use for the range. + // If revision is less or equal to zero, the range is over the newest key-value store. + // If the revision has been compacted, ErrCompacted is returned as a response. + int64 revision = 4; + + // sort_order is the order for returned sorted results. + SortOrder sort_order = 5; + + // sort_target is the key-value field to use for sorting. + SortTarget sort_target = 6; + + // serializable sets the range request to use serializable member-local reads. + // Range requests are linearizable by default; linearizable requests have higher + // latency and lower throughput than serializable requests but reflect the current + // consensus of the cluster. For better performance, in exchange for possible stale reads, + // a serializable range request is served locally without needing to reach consensus + // with other nodes in the cluster. + bool serializable = 7; + + // keys_only when set returns only the keys and not the values. + bool keys_only = 8; + + // count_only when set returns only the count of the keys in the range. + bool count_only = 9; + + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + int64 min_mod_revision = 10; + + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + int64 max_mod_revision = 11; + + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create revisions will be filtered away. + int64 min_create_revision = 12; + + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + int64 max_create_revision = 13; +} + +message RangeResponse { + ResponseHeader header = 1; + // kvs is the list of key-value pairs matched by the range request. + // kvs is empty when count is requested. + repeated mvccpb.KeyValue kvs = 2; + // more indicates if there are more keys to return in the requested range. + bool more = 3; + // count is set to the number of keys within the range when requested. + int64 count = 4; +} + +message PutRequest { + // key is the key, in bytes, to put into the key-value store. + bytes key = 1; + // value is the value, in bytes, to associate with the key in the key-value store. + bytes value = 2; + // lease is the lease ID to associate with the key in the key-value store. A lease + // value of 0 indicates no lease. + int64 lease = 3; + + // If prev_kv is set, etcd gets the previous key-value pair before changing it. + // The previous key-value pair will be returned in the put response. + bool prev_kv = 4; + + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + bool ignore_value = 5; + + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + bool ignore_lease = 6; +} + +message PutResponse { + ResponseHeader header = 1; + // if prev_kv is set in the request, the previous key-value pair will be returned. + mvccpb.KeyValue prev_kv = 2; +} + +message DeleteRangeRequest { + // key is the first key to delete in the range. + bytes key = 1; + // range_end is the key following the last key to delete for the range [key, range_end). + // If range_end is not given, the range is defined to contain only the key argument. + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). + // If range_end is '\0', the range is all keys greater than or equal to the key argument. + bytes range_end = 2; + + // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. + // The previous key-value pairs will be returned in the delete response. + bool prev_kv = 3; +} + +message DeleteRangeResponse { + ResponseHeader header = 1; + // deleted is the number of keys deleted by the delete range request. + int64 deleted = 2; + // if prev_kv is set in the request, the previous key-value pairs will be returned. + repeated mvccpb.KeyValue prev_kvs = 3; +} + +message RequestOp { + // request is a union of request types accepted by a transaction. + oneof request { + RangeRequest request_range = 1; + PutRequest request_put = 2; + DeleteRangeRequest request_delete_range = 3; + TxnRequest request_txn = 4; + } +} + +message ResponseOp { + // response is a union of response types returned by a transaction. + oneof response { + RangeResponse response_range = 1; + PutResponse response_put = 2; + DeleteRangeResponse response_delete_range = 3; + TxnResponse response_txn = 4; + } +} + +message Compare { + enum CompareResult { + EQUAL = 0; + GREATER = 1; + LESS = 2; + NOT_EQUAL = 3; + } + enum CompareTarget { + VERSION = 0; + CREATE = 1; + MOD = 2; + VALUE = 3; + LEASE = 4; + } + // result is logical comparison operation for this comparison. + CompareResult result = 1; + // target is the key-value field to inspect for the comparison. + CompareTarget target = 2; + // key is the subject key for the comparison operation. + bytes key = 3; + oneof target_union { + // version is the version of the given key + int64 version = 4; + // create_revision is the creation revision of the given key + int64 create_revision = 5; + // mod_revision is the last modified revision of the given key. + int64 mod_revision = 6; + // value is the value of the given key, in bytes. + bytes value = 7; + // lease is the lease id of the given key. + int64 lease = 8; + // leave room for more target_union field tags, jump to 64 + } + + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + bytes range_end = 64; + // TODO: fill out with most of the rest of RangeRequest fields when needed. +} + +// From google paxosdb paper: +// Our implementation hinges around a powerful primitive which we call MultiOp. All other database +// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically +// and consists of three components: +// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check +// for the absence or presence of a value, or compare with a given value. Two different tests in the guard +// may apply to the same or different entries in the database. All tests in the guard are applied and +// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise +// it executes f op (see item 3 below). +// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or +// lookup operation, and applies to a single database entry. Two different operations in the list may apply +// to the same or different entries in the database. These operations are executed +// if guard evaluates to +// true. +// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. +message TxnRequest { + // compare is a list of predicates representing a conjunction of terms. + // If the comparisons succeed, then the success requests will be processed in order, + // and the response will contain their respective responses in order. + // If the comparisons fail, then the failure requests will be processed in order, + // and the response will contain their respective responses in order. + repeated Compare compare = 1; + // success is a list of requests which will be applied when compare evaluates to true. + repeated RequestOp success = 2; + // failure is a list of requests which will be applied when compare evaluates to false. + repeated RequestOp failure = 3; +} + +message TxnResponse { + ResponseHeader header = 1; + // succeeded is set to true if the compare evaluated to true or false otherwise. + bool succeeded = 2; + // responses is a list of responses corresponding to the results from applying + // success if succeeded is true or failure if succeeded is false. + repeated ResponseOp responses = 3; +} + +// CompactionRequest compacts the key-value store up to a given revision. All superseded keys +// with a revision less than the compaction revision will be removed. +message CompactionRequest { + // revision is the key-value store revision for the compaction operation. + int64 revision = 1; + // physical is set so the RPC will wait until the compaction is physically + // applied to the local database such that compacted entries are totally + // removed from the backend database. + bool physical = 2; +} + +message CompactionResponse { + ResponseHeader header = 1; +} + +message HashRequest { +} + +message HashKVRequest { + // revision is the key-value store revision for the hash operation. + int64 revision = 1; +} + +message HashKVResponse { + ResponseHeader header = 1; + // hash is the hash value computed from the responding member's MVCC keys up to a given revision. + uint32 hash = 2; + // compact_revision is the compacted revision of key-value store when hash begins. + int64 compact_revision = 3; +} + +message HashResponse { + ResponseHeader header = 1; + // hash is the hash value computed from the responding member's KV's backend. + uint32 hash = 2; +} + +message SnapshotRequest { +} + +message SnapshotResponse { + // header has the current key-value store information. The first header in the snapshot + // stream indicates the point in time of the snapshot. + ResponseHeader header = 1; + + // remaining_bytes is the number of blob bytes to be sent after this message + uint64 remaining_bytes = 2; + + // blob contains the next chunk of the snapshot in the snapshot stream. + bytes blob = 3; +} + +message WatchRequest { + // request_union is a request to either create a new watcher or cancel an existing watcher. + oneof request_union { + WatchCreateRequest create_request = 1; + WatchCancelRequest cancel_request = 2; + WatchProgressRequest progress_request = 3; + } +} + +message WatchCreateRequest { + // key is the key to register for watching. + bytes key = 1; + + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, + // only the key argument is watched. If range_end is equal to '\0', all keys greater than + // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. + bytes range_end = 2; + + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". + int64 start_revision = 3; + + // progress_notify is set so that the etcd server will periodically send a WatchResponse with + // no events to the new watcher if there are no recent events. It is useful when clients + // wish to recover a disconnected watcher starting from a recent known revision. + // The etcd server may decide how often it will send notifications based on current load. + bool progress_notify = 4; + + enum FilterType { + // filter out put event. + NOPUT = 0; + // filter out delete event. + NODELETE = 1; + } + + // filters filter the events at server side before it sends back to the watcher. + repeated FilterType filters = 5; + + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + bool prev_kv = 6; + + // If watch_id is provided and non-zero, it will be assigned to this watcher. + // Since creating a watcher in etcd is not a synchronous operation, + // this can be used ensure that ordering is correct when creating multiple + // watchers on the same stream. Creating a watcher with an ID already in + // use on the stream will cause an error to be returned. + int64 watch_id = 7; + + // fragment enables splitting large revisions into multiple watch responses. + bool fragment = 8; +} + +message WatchCancelRequest { + // watch_id is the watcher id to cancel so that no more events are transmitted. + int64 watch_id = 1; +} + +// Requests the a watch stream progress status be sent in the watch response stream as soon as +// possible. +message WatchProgressRequest { +} + +message WatchResponse { + ResponseHeader header = 1; + // watch_id is the ID of the watcher that corresponds to the response. + int64 watch_id = 2; + + // created is set to true if the response is for a create watch request. + // The client should record the watch_id and expect to receive events for + // the created watcher from the same stream. + // All events sent to the created watcher will attach with the same watch_id. + bool created = 3; + + // canceled is set to true if the response is for a cancel watch request. + // No further events will be sent to the canceled watcher. + bool canceled = 4; + + // compact_revision is set to the minimum index if a watcher tries to watch + // at a compacted index. + // + // This happens when creating a watcher at a compacted revision or the watcher cannot + // catch up with the progress of the key-value store. + // + // The client should treat the watcher as canceled and should not try to create any + // watcher with the same start_revision again. + int64 compact_revision = 5; + + // cancel_reason indicates the reason for canceling the watcher. + string cancel_reason = 6; + + // framgment is true if large watch response was split over multiple responses. + bool fragment = 7; + + repeated mvccpb.Event events = 11; +} + +message LeaseGrantRequest { + // TTL is the advisory time-to-live in seconds. Expired lease will return -1. + int64 TTL = 1; + // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. + int64 ID = 2; +} + +message LeaseGrantResponse { + ResponseHeader header = 1; + // ID is the lease ID for the granted lease. + int64 ID = 2; + // TTL is the server chosen lease time-to-live in seconds. + int64 TTL = 3; + string error = 4; +} + +message LeaseRevokeRequest { + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. + int64 ID = 1; +} + +message LeaseRevokeResponse { + ResponseHeader header = 1; +} + +message LeaseCheckpoint { + // ID is the lease ID to checkpoint. + int64 ID = 1; + + // Remaining_TTL is the remaining time until expiry of the lease. + int64 remaining_TTL = 2; +} + +message LeaseCheckpointRequest { + repeated LeaseCheckpoint checkpoints = 1; +} + +message LeaseCheckpointResponse { + ResponseHeader header = 1; +} + +message LeaseKeepAliveRequest { + // ID is the lease ID for the lease to keep alive. + int64 ID = 1; +} + +message LeaseKeepAliveResponse { + ResponseHeader header = 1; + // ID is the lease ID from the keep alive request. + int64 ID = 2; + // TTL is the new time-to-live for the lease. + int64 TTL = 3; +} + +message LeaseTimeToLiveRequest { + // ID is the lease ID for the lease. + int64 ID = 1; + // keys is true to query all the keys attached to this lease. + bool keys = 2; +} + +message LeaseTimeToLiveResponse { + ResponseHeader header = 1; + // ID is the lease ID from the keep alive request. + int64 ID = 2; + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + int64 TTL = 3; + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + int64 grantedTTL = 4; + // Keys is the list of keys attached to this lease. + repeated bytes keys = 5; +} + +message LeaseLeasesRequest { +} + +message LeaseStatus { + int64 ID = 1; + // TODO: int64 TTL = 2; +} + +message LeaseLeasesResponse { + ResponseHeader header = 1; + repeated LeaseStatus leases = 2; +} + +message Member { + // ID is the member ID for this member. + uint64 ID = 1; + // name is the human-readable name of the member. If the member is not started, the name will be an empty string. + string name = 2; + // peerURLs is the list of URLs the member exposes to the cluster for communication. + repeated string peerURLs = 3; + // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. + repeated string clientURLs = 4; + // isLearner indicates if the member is raft learner. + bool isLearner = 5; +} + +message MemberAddRequest { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + repeated string peerURLs = 1; + // isLearner indicates if the added member is raft learner. + bool isLearner = 2; +} + +message MemberAddResponse { + ResponseHeader header = 1; + // member is the member information for the added member. + Member member = 2; + // members is a list of all members after adding the new member. + repeated Member members = 3; +} + +message MemberRemoveRequest { + // ID is the member ID of the member to remove. + uint64 ID = 1; +} + +message MemberRemoveResponse { + ResponseHeader header = 1; + // members is a list of all members after removing the member. + repeated Member members = 2; +} + +message MemberUpdateRequest { + // ID is the member ID of the member to update. + uint64 ID = 1; + // peerURLs is the new list of URLs the member will use to communicate with the cluster. + repeated string peerURLs = 2; +} + +message MemberUpdateResponse{ + ResponseHeader header = 1; + // members is a list of all members after updating the member. + repeated Member members = 2; +} + +message MemberListRequest { +} + +message MemberListResponse { + ResponseHeader header = 1; + // members is a list of all members associated with the cluster. + repeated Member members = 2; +} + +message MemberPromoteRequest { + // ID is the member ID of the member to promote. + uint64 ID = 1; +} + +message MemberPromoteResponse { + ResponseHeader header = 1; + // members is a list of all members after promoting the member. + repeated Member members = 2; +} + +message DefragmentRequest { +} + +message DefragmentResponse { + ResponseHeader header = 1; +} + +message MoveLeaderRequest { + // targetID is the node ID for the new leader. + uint64 targetID = 1; +} + +message MoveLeaderResponse { + ResponseHeader header = 1; +} + +enum AlarmType { + NONE = 0; // default, used to query if any alarm is active + NOSPACE = 1; // space quota is exhausted + CORRUPT = 2; // kv store corruption detected +} + +message AlarmRequest { + enum AlarmAction { + GET = 0; + ACTIVATE = 1; + DEACTIVATE = 2; + } + // action is the kind of alarm request to issue. The action + // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a + // raised alarm. + AlarmAction action = 1; + // memberID is the ID of the member associated with the alarm. If memberID is 0, the + // alarm request covers all members. + uint64 memberID = 2; + // alarm is the type of alarm to consider for this request. + AlarmType alarm = 3; +} + +message AlarmMember { + // memberID is the ID of the member associated with the raised alarm. + uint64 memberID = 1; + // alarm is the type of alarm which has been raised. + AlarmType alarm = 2; +} + +message AlarmResponse { + ResponseHeader header = 1; + // alarms is a list of alarms associated with the alarm request. + repeated AlarmMember alarms = 2; +} + +message StatusRequest { +} + +message StatusResponse { + ResponseHeader header = 1; + // version is the cluster protocol version used by the responding member. + string version = 2; + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. + int64 dbSize = 3; + // leader is the member ID which the responding member believes is the current leader. + uint64 leader = 4; + // raftIndex is the current raft committed index of the responding member. + uint64 raftIndex = 5; + // raftTerm is the current raft term of the responding member. + uint64 raftTerm = 6; + // raftAppliedIndex is the current raft applied index of the responding member. + uint64 raftAppliedIndex = 7; + // errors contains alarm/health information and status. + repeated string errors = 8; + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + int64 dbSizeInUse = 9; + // isLearner indicates if the member is raft learner. + bool isLearner = 10; +} + +message AuthEnableRequest { +} + +message AuthDisableRequest { +} + +message AuthenticateRequest { + string name = 1; + string password = 2; +} + +message AuthUserAddRequest { + string name = 1; + string password = 2; + authpb.UserAddOptions options = 3; +} + +message AuthUserGetRequest { + string name = 1; +} + +message AuthUserDeleteRequest { + // name is the name of the user to delete. + string name = 1; +} + +message AuthUserChangePasswordRequest { + // name is the name of the user whose password is being changed. + string name = 1; + // password is the new password for the user. + string password = 2; +} + +message AuthUserGrantRoleRequest { + // user is the name of the user which should be granted a given role. + string user = 1; + // role is the name of the role to grant to the user. + string role = 2; +} + +message AuthUserRevokeRoleRequest { + string name = 1; + string role = 2; +} + +message AuthRoleAddRequest { + // name is the name of the role to add to the authentication system. + string name = 1; +} + +message AuthRoleGetRequest { + string role = 1; +} + +message AuthUserListRequest { +} + +message AuthRoleListRequest { +} + +message AuthRoleDeleteRequest { + string role = 1; +} + +message AuthRoleGrantPermissionRequest { + // name is the name of the role which will be granted the permission. + string name = 1; + // perm is the permission to grant to the role. + authpb.Permission perm = 2; +} + +message AuthRoleRevokePermissionRequest { + string role = 1; + bytes key = 2; + bytes range_end = 3; +} + +message AuthEnableResponse { + ResponseHeader header = 1; +} + +message AuthDisableResponse { + ResponseHeader header = 1; +} + +message AuthenticateResponse { + ResponseHeader header = 1; + // token is an authorized token that can be used in succeeding RPCs + string token = 2; +} + +message AuthUserAddResponse { + ResponseHeader header = 1; +} + +message AuthUserGetResponse { + ResponseHeader header = 1; + + repeated string roles = 2; +} + +message AuthUserDeleteResponse { + ResponseHeader header = 1; +} + +message AuthUserChangePasswordResponse { + ResponseHeader header = 1; +} + +message AuthUserGrantRoleResponse { + ResponseHeader header = 1; +} + +message AuthUserRevokeRoleResponse { + ResponseHeader header = 1; +} + +message AuthRoleAddResponse { + ResponseHeader header = 1; +} + +message AuthRoleGetResponse { + ResponseHeader header = 1; + + repeated authpb.Permission perm = 2; +} + +message AuthRoleListResponse { + ResponseHeader header = 1; + + repeated string roles = 2; +} + +message AuthUserListResponse { + ResponseHeader header = 1; + + repeated string users = 2; +} + +message AuthRoleDeleteResponse { + ResponseHeader header = 1; +} + +message AuthRoleGrantPermissionResponse { + ResponseHeader header = 1; +} + +message AuthRoleRevokePermissionResponse { + ResponseHeader header = 1; +} diff --git a/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go new file mode 100644 index 0000000000..23fe337a59 --- /dev/null +++ b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go @@ -0,0 +1,718 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kv.proto + +/* + Package mvccpb is a generated protocol buffer package. + + It is generated from these files: + kv.proto + + It has these top-level messages: + KeyValue + Event +*/ +package mvccpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Event_EventType int32 + +const ( + PUT Event_EventType = 0 + DELETE Event_EventType = 1 +) + +var Event_EventType_name = map[int32]string{ + 0: "PUT", + 1: "DELETE", +} +var Event_EventType_value = map[string]int32{ + "PUT": 0, + "DELETE": 1, +} + +func (x Event_EventType) String() string { + return proto.EnumName(Event_EventType_name, int32(x)) +} +func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} } + +type KeyValue struct { + // key is the key in bytes. An empty key is not allowed. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // create_revision is the revision of last creation on this key. + CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"` + // mod_revision is the revision of last modification on this key. + ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"` + // version is the version of the key. A deletion resets + // the version to zero and any modification of the key + // increases its version. + Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` + // value is the value held by the key, in bytes. + Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` + // lease is the ID of the lease that attached to key. + // When the attached lease expires, the key will be deleted. + // If lease is 0, then no lease is attached to the key. + Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} } + +type Event struct { + // type is the kind of event. If type is a PUT, it indicates + // new data has been stored to the key. If type is a DELETE, + // it indicates the key was deleted. + Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"` + // kv holds the KeyValue for the event. + // A PUT event contains current kv pair. + // A PUT event with kv.Version=1 indicates the creation of a key. + // A DELETE/EXPIRE event contains the deleted key with + // its modification revision set to the revision of deletion. + Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` + // prev_kv holds the key-value pair before the event happens. + PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} } + +func init() { + proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue") + proto.RegisterType((*Event)(nil), "mvccpb.Event") + proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) +} +func (m *KeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.CreateRevision != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) + } + if m.ModRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) + } + if m.Version != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Version)) + } + if len(m.Value) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.Lease != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Lease)) + } + return i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Type)) + } + if m.Kv != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size())) + n1, err := m.Kv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.PrevKv != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size())) + n2, err := m.PrevKv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeVarintKv(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *KeyValue) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + if m.CreateRevision != 0 { + n += 1 + sovKv(uint64(m.CreateRevision)) + } + if m.ModRevision != 0 { + n += 1 + sovKv(uint64(m.ModRevision)) + } + if m.Version != 0 { + n += 1 + sovKv(uint64(m.Version)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovKv(uint64(m.Lease)) + } + return n +} + +func (m *Event) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovKv(uint64(m.Type)) + } + if m.Kv != nil { + l = m.Kv.Size() + n += 1 + l + sovKv(uint64(l)) + } + if m.PrevKv != nil { + l = m.PrevKv.Size() + n += 1 + l + sovKv(uint64(l)) + } + return n +} + +func sovKv(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozKv(x uint64) (n int) { + return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *KeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) + } + m.CreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) + } + m.ModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Event_EventType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kv == nil { + m.Kv = &KeyValue{} + } + if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrevKv == nil { + m.PrevKv = &KeyValue{} + } + if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipKv(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthKv + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipKv(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } + +var fileDescriptorKv = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, + 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, + 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, + 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, + 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, + 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, + 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, + 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, + 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, + 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, + 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, + 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, + 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, + 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, + 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, + 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, + 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, + 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, +} diff --git a/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto new file mode 100644 index 0000000000..23c911b7da --- /dev/null +++ b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; +package mvccpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +message KeyValue { + // key is the key in bytes. An empty key is not allowed. + bytes key = 1; + // create_revision is the revision of last creation on this key. + int64 create_revision = 2; + // mod_revision is the revision of last modification on this key. + int64 mod_revision = 3; + // version is the version of the key. A deletion resets + // the version to zero and any modification of the key + // increases its version. + int64 version = 4; + // value is the value held by the key, in bytes. + bytes value = 5; + // lease is the ID of the lease that attached to key. + // When the attached lease expires, the key will be deleted. + // If lease is 0, then no lease is attached to the key. + int64 lease = 6; +} + +message Event { + enum EventType { + PUT = 0; + DELETE = 1; + } + // type is the kind of event. If type is a PUT, it indicates + // new data has been stored to the key. If type is a DELETE, + // it indicates the key was deleted. + EventType type = 1; + // kv holds the KeyValue for the event. + // A PUT event contains current kv pair. + // A PUT event with kv.Version=1 indicates the creation of a key. + // A DELETE/EXPIRE event contains the deleted key with + // its modification revision set to the revision of deletion. + KeyValue kv = 2; + + // prev_kv holds the key-value pair before the event happens. + KeyValue prev_kv = 3; +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go new file mode 100644 index 0000000000..4ce15dc6bc --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go @@ -0,0 +1,27 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package fileutil + +import "os" + +const ( + // PrivateDirMode grants owner to make/remove files inside the directory. + PrivateDirMode = 0700 +) + +// OpenDir opens a directory for syncing. +func OpenDir(path string) (*os.File, error) { return os.Open(path) } diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go new file mode 100644 index 0000000000..a10a90583c --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go @@ -0,0 +1,51 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "os" + "syscall" +) + +const ( + // PrivateDirMode grants owner to make/remove files inside the directory. + PrivateDirMode = 0777 +) + +// OpenDir opens a directory in windows with write access for syncing. +func OpenDir(path string) (*os.File, error) { + fd, err := openDir(path) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func openDir(path string) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE) + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) + createmode := uint32(syscall.OPEN_EXISTING) + fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) + return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0) +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go b/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go new file mode 100644 index 0000000000..69dde5a7dd --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fileutil implements utility functions related to files and paths. +package fileutil diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go b/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go new file mode 100644 index 0000000000..f36136182b --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go @@ -0,0 +1,129 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/coreos/pkg/capnslog" +) + +const ( + // PrivateFileMode grants owner to read/write a file. + PrivateFileMode = 0600 +) + +var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "pkg/fileutil") + +// IsDirWriteable checks if dir is writable by writing and removing a file +// to dir. It returns nil if dir is writable. +func IsDirWriteable(dir string) error { + f := filepath.Join(dir, ".touch") + if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { + return err + } + return os.Remove(f) +} + +// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory +// does not exists. TouchDirAll also ensures the given directory is writable. +func TouchDirAll(dir string) error { + // If path is already a directory, MkdirAll does nothing and returns nil, so, + // first check if dir exist with an expected permission mode. + if Exist(dir) { + err := CheckDirPermission(dir, PrivateDirMode) + if err != nil { + plog.Warningf("check file permission: %v", err) + } + } else { + err := os.MkdirAll(dir, PrivateDirMode) + if err != nil { + // if mkdirAll("a/text") and "text" is not + // a directory, this will return syscall.ENOTDIR + return err + } + } + + return IsDirWriteable(dir) +} + +// CreateDirAll is similar to TouchDirAll but returns error +// if the deepest directory was not empty. +func CreateDirAll(dir string) error { + err := TouchDirAll(dir) + if err == nil { + var ns []string + ns, err = ReadDir(dir) + if err != nil { + return err + } + if len(ns) != 0 { + err = fmt.Errorf("expected %q to be empty, got %q", dir, ns) + } + } + return err +} + +// Exist returns true if a file or directory exists. +func Exist(name string) bool { + _, err := os.Stat(name) + return err == nil +} + +// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily +// shorten the length of the file. +func ZeroToEnd(f *os.File) error { + // TODO: support FALLOC_FL_ZERO_RANGE + off, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + lenf, lerr := f.Seek(0, io.SeekEnd) + if lerr != nil { + return lerr + } + if err = f.Truncate(off); err != nil { + return err + } + // make sure blocks remain allocated + if err = Preallocate(f, lenf, true); err != nil { + return err + } + _, err = f.Seek(off, io.SeekStart) + return err +} + +// CheckDirPermission checks permission on an existing dir. +// Returns error if dir is empty or exist with a different permission than specified. +func CheckDirPermission(dir string, perm os.FileMode) error { + if !Exist(dir) { + return fmt.Errorf("directory %q empty, cannot check permission.", dir) + } + //check the existing permission on the directory + dirInfo, err := os.Stat(dir) + if err != nil { + return err + } + dirMode := dirInfo.Mode().Perm() + if dirMode != perm { + err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data.", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode)) + return err + } + return nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock.go new file mode 100644 index 0000000000..338627f43c --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock.go @@ -0,0 +1,26 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "errors" + "os" +) + +var ( + ErrLocked = errors.New("fileutil: file already locked") +) + +type LockedFile struct{ *os.File } diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go new file mode 100644 index 0000000000..542550bc8a --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!plan9,!solaris + +package fileutil + +import ( + "os" + "syscall" +) + +func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { + f.Close() + if err == syscall.EWOULDBLOCK { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, err +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go new file mode 100644 index 0000000000..b0abc98eeb --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go @@ -0,0 +1,97 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "fmt" + "io" + "os" + "syscall" +) + +// This used to call syscall.Flock() but that call fails with EBADF on NFS. +// An alternative is lockf() which works on NFS but that call lets a process lock +// the same file twice. Instead, use Linux's non-standard open file descriptor +// locks which will block if the process already holds the file lock. +// +// constants from /usr/include/bits/fcntl-linux.h +const ( + F_OFD_GETLK = 37 + F_OFD_SETLK = 37 + F_OFD_SETLKW = 38 +) + +var ( + wrlck = syscall.Flock_t{ + Type: syscall.F_WRLCK, + Whence: int16(io.SeekStart), + Start: 0, + Len: 0, + } + + linuxTryLockFile = flockTryLockFile + linuxLockFile = flockLockFile +) + +func init() { + // use open file descriptor locks if the system supports it + getlk := syscall.Flock_t{Type: syscall.F_RDLCK} + if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil { + linuxTryLockFile = ofdTryLockFile + linuxLockFile = ofdLockFile + } +} + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return linuxTryLockFile(path, flag, perm) +} + +func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%v)", path, err) + } + + flock := wrlck + if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil { + f.Close() + if err == syscall.EWOULDBLOCK { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return linuxLockFile(path, flag, perm) +} + +func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, fmt.Errorf("ofdLockFile failed to open %q (%v)", path, err) + } + + flock := wrlck + err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock) + if err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go new file mode 100644 index 0000000000..fee6a7c8f4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go @@ -0,0 +1,45 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "syscall" + "time" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { + return nil, err + } + f, err := os.Open(path, flag, perm) + if err != nil { + return nil, ErrLocked + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { + return nil, err + } + for { + f, err := os.OpenFile(path, flag, perm) + if err == nil { + return &LockedFile{f}, nil + } + time.Sleep(10 * time.Millisecond) + } +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go new file mode 100644 index 0000000000..352ca5590d --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go @@ -0,0 +1,62 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build solaris + +package fileutil + +import ( + "os" + "syscall" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Type = syscall.F_WRLCK + lock.Whence = 0 + lock.Pid = 0 + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil { + f.Close() + if err == syscall.EAGAIN { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Type = syscall.F_WRLCK + lock.Whence = 0 + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go new file mode 100644 index 0000000000..ed01164de6 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go @@ -0,0 +1,29 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!plan9,!solaris,!linux + +package fileutil + +import ( + "os" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return flockTryLockFile(path, flag, perm) +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return flockLockFile(path, flag, perm) +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go new file mode 100644 index 0000000000..b1817230a3 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go @@ -0,0 +1,125 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "errors" + "fmt" + "os" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + + errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.") +) + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + LOCKFILE_EXCLUSIVE_LOCK = 2 + LOCKFILE_FAIL_IMMEDIATELY = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func open(path string, flag int, perm os.FileMode) (*os.File, error) { + if path == "" { + return nil, fmt.Errorf("cannot open empty filename") + } + var access uint32 + switch flag { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + case syscall.O_WRONLY | syscall.O_CREAT: + access = syscall.GENERIC_ALL + default: + panic(fmt.Errorf("flag %v is not supported", flag)) + } + fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]), + access, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, + syscall.OPEN_ALWAYS, + syscall.FILE_ATTRIBUTE_NORMAL, + 0) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func lockFile(fd syscall.Handle, flags uint32) error { + var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK + flag |= flags + if fd == syscall.InvalidHandle { + return nil + } + err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err.Error() == errLocked.Error() { + return ErrLocked + } else if err != errLockViolation { + return err + } + return nil +} + +func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + var reserved uint32 = 0 + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return err +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go new file mode 100644 index 0000000000..c747b7cf81 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go @@ -0,0 +1,54 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "io" + "os" +) + +// Preallocate tries to allocate the space for given +// file. This operation is only supported on linux by a +// few filesystems (btrfs, ext4, etc.). +// If the operation is unsupported, no error will be returned. +// Otherwise, the error encountered will be returned. +func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { + if sizeInBytes == 0 { + // fallocate will return EINVAL if length is 0; skip + return nil + } + if extendFile { + return preallocExtend(f, sizeInBytes) + } + return preallocFixed(f, sizeInBytes) +} + +func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { + curOff, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + size, err := f.Seek(sizeInBytes, io.SeekEnd) + if err != nil { + return err + } + if _, err = f.Seek(curOff, io.SeekStart); err != nil { + return err + } + if sizeInBytes > size { + return nil + } + return f.Truncate(sizeInBytes) +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go new file mode 100644 index 0000000000..5a6dccfa79 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go @@ -0,0 +1,65 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin + +package fileutil + +import ( + "os" + "syscall" + "unsafe" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + if err := preallocFixed(f, sizeInBytes); err != nil { + return err + } + return preallocExtendTrunc(f, sizeInBytes) +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + // allocate all requested space or no space at all + // TODO: allocate contiguous space on disk with F_ALLOCATECONTIG flag + fstore := &syscall.Fstore_t{ + Flags: syscall.F_ALLOCATEALL, + Posmode: syscall.F_PEOFPOSMODE, + Length: sizeInBytes} + p := unsafe.Pointer(fstore) + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p)) + if errno == 0 || errno == syscall.ENOTSUP { + return nil + } + + // wrong argument to fallocate syscall + if errno == syscall.EINVAL { + // filesystem "st_blocks" are allocated in the units of + // "Allocation Block Size" (run "diskutil info /" command) + var stat syscall.Stat_t + syscall.Fstat(int(f.Fd()), &stat) + + // syscall.Statfs_t.Bsize is "optimal transfer block size" + // and contains matching 4096 value when latest OS X kernel + // supports 4,096 KB filesystem block size + var statfs syscall.Statfs_t + syscall.Fstatfs(int(f.Fd()), &statfs) + blockSize := int64(statfs.Bsize) + + if stat.Blocks*blockSize >= sizeInBytes { + // enough blocks are already allocated + return nil + } + } + return errno +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go new file mode 100644 index 0000000000..50bd84f02a --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + // use mode = 0 to change size + err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // not supported; fallback + // fallocate EINTRs frequently in some environments; fallback + if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { + return preallocExtendTrunc(f, sizeInBytes) + } + } + return err +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE + err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // treat not supported as nil error + if ok && errno == syscall.ENOTSUP { + return nil + } + } + return err +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go new file mode 100644 index 0000000000..162fbc5f78 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go @@ -0,0 +1,25 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin + +package fileutil + +import "os" + +func preallocExtend(f *os.File, sizeInBytes int64) error { + return preallocExtendTrunc(f, sizeInBytes) +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { return nil } diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go b/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go new file mode 100644 index 0000000000..d116f340b6 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go @@ -0,0 +1,98 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "path/filepath" + "sort" + "strings" + "time" + + "go.uber.org/zap" +) + +func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { + return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil) +} + +func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { + doneC := make(chan struct{}) + errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC) + return doneC, errC +} + +// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. +// if donec is non-nil, the function closes it to notify its exit. +func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error { + errC := make(chan error, 1) + go func() { + if donec != nil { + defer close(donec) + } + for { + fnames, err := ReadDir(dirname) + if err != nil { + errC <- err + return + } + newfnames := make([]string, 0) + for _, fname := range fnames { + if strings.HasSuffix(fname, suffix) { + newfnames = append(newfnames, fname) + } + } + sort.Strings(newfnames) + fnames = newfnames + for len(newfnames) > int(max) { + f := filepath.Join(dirname, newfnames[0]) + l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) + if err != nil { + break + } + if err = os.Remove(f); err != nil { + errC <- err + return + } + if err = l.Close(); err != nil { + if lg != nil { + lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) + } else { + plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) + } + errC <- err + return + } + if lg != nil { + lg.Info("purged", zap.String("path", f)) + } else { + plog.Infof("purged file %s successfully", f) + } + newfnames = newfnames[1:] + } + if purgec != nil { + for i := 0; i < len(fnames)-len(newfnames); i++ { + purgec <- fnames[i] + } + } + select { + case <-time.After(interval): + case <-stop: + return + } + } + }() + return errC +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go b/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go new file mode 100644 index 0000000000..2eeaa89bc0 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go @@ -0,0 +1,70 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "path/filepath" + "sort" +) + +// ReadDirOp represents an read-directory operation. +type ReadDirOp struct { + ext string +} + +// ReadDirOption configures archiver operations. +type ReadDirOption func(*ReadDirOp) + +// WithExt filters file names by their extensions. +// (e.g. WithExt(".wal") to list only WAL files) +func WithExt(ext string) ReadDirOption { + return func(op *ReadDirOp) { op.ext = ext } +} + +func (op *ReadDirOp) applyOpts(opts []ReadDirOption) { + for _, opt := range opts { + opt(op) + } +} + +// ReadDir returns the filenames in the given directory in sorted order. +func ReadDir(d string, opts ...ReadDirOption) ([]string, error) { + op := &ReadDirOp{} + op.applyOpts(opts) + + dir, err := os.Open(d) + if err != nil { + return nil, err + } + defer dir.Close() + + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + sort.Strings(names) + + if op.ext != "" { + tss := make([]string, 0) + for _, v := range names { + if filepath.Ext(v) == op.ext { + tss = append(tss, v) + } + } + names = tss + } + return names, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/sync.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync.go new file mode 100644 index 0000000000..54dd41f4f3 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/sync.go @@ -0,0 +1,29 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin + +package fileutil + +import "os" + +// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. +func Fsync(f *os.File) error { + return f.Sync() +} + +// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform. +func Fdatasync(f *os.File) error { + return f.Sync() +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go new file mode 100644 index 0000000000..c2f39bf204 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go @@ -0,0 +1,40 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin + +package fileutil + +import ( + "os" + "syscall" +) + +// Fsync on HFS/OSX flushes the data on to the physical drive but the drive +// may not write it to the persistent media for quite sometime and it may be +// written in out-of-order sequence. Using F_FULLFSYNC ensures that the +// physical drive's buffer will also get flushed to the media. +func Fsync(f *os.File) error { + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0)) + if errno == 0 { + return nil + } + return errno +} + +// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence +// on physical drive media. +func Fdatasync(f *os.File) error { + return Fsync(f) +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go new file mode 100644 index 0000000000..1bbced915e --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go @@ -0,0 +1,34 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. +func Fsync(f *os.File) error { + return f.Sync() +} + +// Fdatasync is similar to fsync(), but does not flush modified metadata +// unless that metadata is needed in order to allow a subsequent data retrieval +// to be correctly handled. +func Fdatasync(f *os.File) error { + return syscall.Fdatasync(int(f.Fd())) +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go new file mode 100644 index 0000000000..81b0a9d039 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go @@ -0,0 +1,46 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "log" + + "google.golang.org/grpc/grpclog" +) + +// assert that "discardLogger" satisfy "Logger" interface +var _ Logger = &discardLogger{} + +// NewDiscardLogger returns a new Logger that discards everything except "fatal". +func NewDiscardLogger() Logger { return &discardLogger{} } + +type discardLogger struct{} + +func (l *discardLogger) Info(args ...interface{}) {} +func (l *discardLogger) Infoln(args ...interface{}) {} +func (l *discardLogger) Infof(format string, args ...interface{}) {} +func (l *discardLogger) Warning(args ...interface{}) {} +func (l *discardLogger) Warningln(args ...interface{}) {} +func (l *discardLogger) Warningf(format string, args ...interface{}) {} +func (l *discardLogger) Error(args ...interface{}) {} +func (l *discardLogger) Errorln(args ...interface{}) {} +func (l *discardLogger) Errorf(format string, args ...interface{}) {} +func (l *discardLogger) Fatal(args ...interface{}) { log.Fatal(args...) } +func (l *discardLogger) Fatalln(args ...interface{}) { log.Fatalln(args...) } +func (l *discardLogger) Fatalf(format string, args ...interface{}) { log.Fatalf(format, args...) } +func (l *discardLogger) V(lvl int) bool { + return false +} +func (l *discardLogger) Lvl(lvl int) grpclog.LoggerV2 { return l } diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/doc.go b/vendor/go.etcd.io/etcd/pkg/logutil/doc.go new file mode 100644 index 0000000000..e919f24993 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logutil includes utilities to facilitate logging. +package logutil diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/log_level.go b/vendor/go.etcd.io/etcd/pkg/logutil/log_level.go new file mode 100644 index 0000000000..d57e173944 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/log_level.go @@ -0,0 +1,70 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "fmt" + + "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var DefaultLogLevel = "info" + +// ConvertToZapLevel converts log level string to zapcore.Level. +func ConvertToZapLevel(lvl string) zapcore.Level { + switch lvl { + case "debug": + return zap.DebugLevel + case "info": + return zap.InfoLevel + case "warn": + return zap.WarnLevel + case "error": + return zap.ErrorLevel + case "dpanic": + return zap.DPanicLevel + case "panic": + return zap.PanicLevel + case "fatal": + return zap.FatalLevel + default: + panic(fmt.Sprintf("unknown level %q", lvl)) + } +} + +// ConvertToCapnslogLogLevel convert log level string to capnslog.LogLevel. +// TODO: deprecate this in 3.5 +func ConvertToCapnslogLogLevel(lvl string) capnslog.LogLevel { + switch lvl { + case "debug": + return capnslog.DEBUG + case "info": + return capnslog.INFO + case "warn": + return capnslog.WARNING + case "error": + return capnslog.ERROR + case "dpanic": + return capnslog.CRITICAL + case "panic": + return capnslog.CRITICAL + case "fatal": + return capnslog.CRITICAL + default: + panic(fmt.Sprintf("unknown level %q", lvl)) + } +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/logger.go new file mode 100644 index 0000000000..e7da80eff1 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/logger.go @@ -0,0 +1,64 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import "google.golang.org/grpc/grpclog" + +// Logger defines logging interface. +// TODO: deprecate in v3.5. +type Logger interface { + grpclog.LoggerV2 + + // Lvl returns logger if logger's verbosity level >= "lvl". + // Otherwise, logger that discards everything. + Lvl(lvl int) grpclog.LoggerV2 +} + +// assert that "defaultLogger" satisfy "Logger" interface +var _ Logger = &defaultLogger{} + +// NewLogger wraps "grpclog.LoggerV2" that implements "Logger" interface. +// +// For example: +// +// var defaultLogger Logger +// g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4) +// defaultLogger = NewLogger(g) +// +func NewLogger(g grpclog.LoggerV2) Logger { return &defaultLogger{g: g} } + +type defaultLogger struct { + g grpclog.LoggerV2 +} + +func (l *defaultLogger) Info(args ...interface{}) { l.g.Info(args...) } +func (l *defaultLogger) Infoln(args ...interface{}) { l.g.Info(args...) } +func (l *defaultLogger) Infof(format string, args ...interface{}) { l.g.Infof(format, args...) } +func (l *defaultLogger) Warning(args ...interface{}) { l.g.Warning(args...) } +func (l *defaultLogger) Warningln(args ...interface{}) { l.g.Warning(args...) } +func (l *defaultLogger) Warningf(format string, args ...interface{}) { l.g.Warningf(format, args...) } +func (l *defaultLogger) Error(args ...interface{}) { l.g.Error(args...) } +func (l *defaultLogger) Errorln(args ...interface{}) { l.g.Error(args...) } +func (l *defaultLogger) Errorf(format string, args ...interface{}) { l.g.Errorf(format, args...) } +func (l *defaultLogger) Fatal(args ...interface{}) { l.g.Fatal(args...) } +func (l *defaultLogger) Fatalln(args ...interface{}) { l.g.Fatal(args...) } +func (l *defaultLogger) Fatalf(format string, args ...interface{}) { l.g.Fatalf(format, args...) } +func (l *defaultLogger) V(lvl int) bool { return l.g.V(lvl) } +func (l *defaultLogger) Lvl(lvl int) grpclog.LoggerV2 { + if l.g.V(lvl) { + return l + } + return &discardLogger{} +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go new file mode 100644 index 0000000000..866b6f7a89 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go @@ -0,0 +1,194 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "fmt" + "sync" + "time" + + "github.com/coreos/pkg/capnslog" +) + +var ( + defaultMergePeriod = time.Second + defaultTimeOutputScale = 10 * time.Millisecond + + outputInterval = time.Second +) + +// line represents a log line that can be printed out +// through capnslog.PackageLogger. +type line struct { + level capnslog.LogLevel + str string +} + +func (l line) append(s string) line { + return line{ + level: l.level, + str: l.str + " " + s, + } +} + +// status represents the merge status of a line. +type status struct { + period time.Duration + + start time.Time // start time of latest merge period + count int // number of merged lines from starting +} + +func (s *status) isInMergePeriod(now time.Time) bool { + return s.period == 0 || s.start.Add(s.period).After(now) +} + +func (s *status) isEmpty() bool { return s.count == 0 } + +func (s *status) summary(now time.Time) string { + ts := s.start.Round(defaultTimeOutputScale) + took := now.Round(defaultTimeOutputScale).Sub(ts) + return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took) +} + +func (s *status) reset(now time.Time) { + s.start = now + s.count = 0 +} + +// MergeLogger supports merge logging, which merges repeated log lines +// and prints summary log lines instead. +// +// For merge logging, MergeLogger prints out the line when the line appears +// at the first time. MergeLogger holds the same log line printed within +// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod. +// It stops merging when the line doesn't appear within the +// defaultMergePeriod. +type MergeLogger struct { + *capnslog.PackageLogger + + mu sync.Mutex // protect statusm + statusm map[line]*status +} + +func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger { + l := &MergeLogger{ + PackageLogger: logger, + statusm: make(map[line]*status), + } + go l.outputLoop() + return l +} + +func (l *MergeLogger) MergeInfo(entries ...interface{}) { + l.merge(line{ + level: capnslog.INFO, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeInfof(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.INFO, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) MergeNotice(entries ...interface{}) { + l.merge(line{ + level: capnslog.NOTICE, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.NOTICE, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) MergeWarning(entries ...interface{}) { + l.merge(line{ + level: capnslog.WARNING, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.WARNING, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) MergeError(entries ...interface{}) { + l.merge(line{ + level: capnslog.ERROR, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.ERROR, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) merge(ln line) { + l.mu.Lock() + + // increase count if the logger is merging the line + if status, ok := l.statusm[ln]; ok { + status.count++ + l.mu.Unlock() + return + } + + // initialize status of the line + l.statusm[ln] = &status{ + period: defaultMergePeriod, + start: time.Now(), + } + // release the lock before IO operation + l.mu.Unlock() + // print out the line at its first time + l.PackageLogger.Logf(ln.level, ln.str) +} + +func (l *MergeLogger) outputLoop() { + for now := range time.Tick(outputInterval) { + var outputs []line + + l.mu.Lock() + for ln, status := range l.statusm { + if status.isInMergePeriod(now) { + continue + } + if status.isEmpty() { + delete(l.statusm, ln) + continue + } + outputs = append(outputs, ln.append(status.summary(now))) + status.reset(now) + } + l.mu.Unlock() + + for _, o := range outputs { + l.PackageLogger.Logf(o.level, o.str) + } + } +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go new file mode 100644 index 0000000000..729cbdb57e --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go @@ -0,0 +1,60 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "github.com/coreos/pkg/capnslog" + "google.golang.org/grpc/grpclog" +) + +// assert that "packageLogger" satisfy "Logger" interface +var _ Logger = &packageLogger{} + +// NewPackageLogger wraps "*capnslog.PackageLogger" that implements "Logger" interface. +// +// For example: +// +// var defaultLogger Logger +// defaultLogger = NewPackageLogger("go.etcd.io/etcd", "snapshot") +// +func NewPackageLogger(repo, pkg string) Logger { + return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)} +} + +type packageLogger struct { + p *capnslog.PackageLogger +} + +func (l *packageLogger) Info(args ...interface{}) { l.p.Info(args...) } +func (l *packageLogger) Infoln(args ...interface{}) { l.p.Info(args...) } +func (l *packageLogger) Infof(format string, args ...interface{}) { l.p.Infof(format, args...) } +func (l *packageLogger) Warning(args ...interface{}) { l.p.Warning(args...) } +func (l *packageLogger) Warningln(args ...interface{}) { l.p.Warning(args...) } +func (l *packageLogger) Warningf(format string, args ...interface{}) { l.p.Warningf(format, args...) } +func (l *packageLogger) Error(args ...interface{}) { l.p.Error(args...) } +func (l *packageLogger) Errorln(args ...interface{}) { l.p.Error(args...) } +func (l *packageLogger) Errorf(format string, args ...interface{}) { l.p.Errorf(format, args...) } +func (l *packageLogger) Fatal(args ...interface{}) { l.p.Fatal(args...) } +func (l *packageLogger) Fatalln(args ...interface{}) { l.p.Fatal(args...) } +func (l *packageLogger) Fatalf(format string, args ...interface{}) { l.p.Fatalf(format, args...) } +func (l *packageLogger) V(lvl int) bool { + return l.p.LevelAt(capnslog.LogLevel(lvl)) +} +func (l *packageLogger) Lvl(lvl int) grpclog.LoggerV2 { + if l.p.LevelAt(capnslog.LogLevel(lvl)) { + return l + } + return &discardLogger{} +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/zap.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap.go new file mode 100644 index 0000000000..8fc6e03b77 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap.go @@ -0,0 +1,91 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "sort" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// DefaultZapLoggerConfig defines default zap logger configuration. +var DefaultZapLoggerConfig = zap.Config{ + Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)), + + Development: false, + Sampling: &zap.SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + + Encoding: "json", + + // copied from "zap.NewProductionEncoderConfig" with some updates + EncoderConfig: zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + }, + + // Use "/dev/null" to discard all + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, +} + +// MergeOutputPaths merges logging output paths, resolving conflicts. +func MergeOutputPaths(cfg zap.Config) zap.Config { + outputs := make(map[string]struct{}) + for _, v := range cfg.OutputPaths { + outputs[v] = struct{}{} + } + outputSlice := make([]string, 0) + if _, ok := outputs["/dev/null"]; ok { + // "/dev/null" to discard all + outputSlice = []string{"/dev/null"} + } else { + for k := range outputs { + outputSlice = append(outputSlice, k) + } + } + cfg.OutputPaths = outputSlice + sort.Strings(cfg.OutputPaths) + + errOutputs := make(map[string]struct{}) + for _, v := range cfg.ErrorOutputPaths { + errOutputs[v] = struct{}{} + } + errOutputSlice := make([]string, 0) + if _, ok := errOutputs["/dev/null"]; ok { + // "/dev/null" to discard all + errOutputSlice = []string{"/dev/null"} + } else { + for k := range errOutputs { + errOutputSlice = append(errOutputSlice, k) + } + } + cfg.ErrorOutputPaths = errOutputSlice + sort.Strings(cfg.ErrorOutputPaths) + + return cfg +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go new file mode 100644 index 0000000000..3f48d813da --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go @@ -0,0 +1,111 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc/grpclog" +) + +// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2". +// It discards all INFO level logging in gRPC, if debug level +// is not enabled in "*zap.Logger". +func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) { + lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil" + if err != nil { + return nil, err + } + return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil +} + +// NewGRPCLoggerV2FromZapCore creates "grpclog.LoggerV2" from "zap.Core" +// and "zapcore.WriteSyncer". It discards all INFO level logging in gRPC, +// if debug level is not enabled in "*zap.Logger". +func NewGRPCLoggerV2FromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) grpclog.LoggerV2 { + // "AddCallerSkip" to annotate caller outside of "logutil" + lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer)) + return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()} +} + +type zapGRPCLogger struct { + lg *zap.Logger + sugar *zap.SugaredLogger +} + +func (zl *zapGRPCLogger) Info(args ...interface{}) { + if !zl.lg.Core().Enabled(zapcore.DebugLevel) { + return + } + zl.sugar.Info(args...) +} + +func (zl *zapGRPCLogger) Infoln(args ...interface{}) { + if !zl.lg.Core().Enabled(zapcore.DebugLevel) { + return + } + zl.sugar.Info(args...) +} + +func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) { + if !zl.lg.Core().Enabled(zapcore.DebugLevel) { + return + } + zl.sugar.Infof(format, args...) +} + +func (zl *zapGRPCLogger) Warning(args ...interface{}) { + zl.sugar.Warn(args...) +} + +func (zl *zapGRPCLogger) Warningln(args ...interface{}) { + zl.sugar.Warn(args...) +} + +func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) { + zl.sugar.Warnf(format, args...) +} + +func (zl *zapGRPCLogger) Error(args ...interface{}) { + zl.sugar.Error(args...) +} + +func (zl *zapGRPCLogger) Errorln(args ...interface{}) { + zl.sugar.Error(args...) +} + +func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) { + zl.sugar.Errorf(format, args...) +} + +func (zl *zapGRPCLogger) Fatal(args ...interface{}) { + zl.sugar.Fatal(args...) +} + +func (zl *zapGRPCLogger) Fatalln(args ...interface{}) { + zl.sugar.Fatal(args...) +} + +func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) { + zl.sugar.Fatalf(format, args...) +} + +func (zl *zapGRPCLogger) V(l int) bool { + // infoLog == 0 + if l <= 0 { // debug level, then we ignore info level in gRPC + return !zl.lg.Core().Enabled(zapcore.DebugLevel) + } + return true +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go new file mode 100644 index 0000000000..fcd3903810 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go @@ -0,0 +1,92 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package logutil + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "go.etcd.io/etcd/pkg/systemd" + + "github.com/coreos/go-systemd/journal" + "go.uber.org/zap/zapcore" +) + +// NewJournalWriter wraps "io.Writer" to redirect log output +// to the local systemd journal. If journald send fails, it fails +// back to writing to the original writer. +// The decode overhead is only <30µs per write. +// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go +func NewJournalWriter(wr io.Writer) (io.Writer, error) { + return &journalWriter{Writer: wr}, systemd.DialJournal() +} + +type journalWriter struct { + io.Writer +} + +// WARN: assume that etcd uses default field names in zap encoder config +// make sure to keep this up-to-date! +type logLine struct { + Level string `json:"level"` + Caller string `json:"caller"` +} + +func (w *journalWriter) Write(p []byte) (int, error) { + line := &logLine{} + if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil { + return 0, err + } + + var pri journal.Priority + switch line.Level { + case zapcore.DebugLevel.String(): + pri = journal.PriDebug + case zapcore.InfoLevel.String(): + pri = journal.PriInfo + + case zapcore.WarnLevel.String(): + pri = journal.PriWarning + case zapcore.ErrorLevel.String(): + pri = journal.PriErr + + case zapcore.DPanicLevel.String(): + pri = journal.PriCrit + case zapcore.PanicLevel.String(): + pri = journal.PriCrit + case zapcore.FatalLevel.String(): + pri = journal.PriCrit + + default: + panic(fmt.Errorf("unknown log level: %q", line.Level)) + } + + err := journal.Send(string(p), pri, map[string]string{ + "PACKAGE": filepath.Dir(line.Caller), + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + }) + if err != nil { + // "journal" also falls back to stderr + // "fmt.Fprintln(os.Stderr, s)" + return w.Writer.Write(p) + } + return 0, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go new file mode 100644 index 0000000000..f016b3054e --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go @@ -0,0 +1,102 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "errors" + + "go.etcd.io/etcd/raft" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// NewRaftLogger builds "raft.Logger" from "*zap.Config". +func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) { + if lcfg == nil { + return nil, errors.New("nil zap.Config") + } + lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil" + if err != nil { + return nil, err + } + return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil +} + +// NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger". +func NewRaftLoggerZap(lg *zap.Logger) raft.Logger { + return &zapRaftLogger{lg: lg, sugar: lg.Sugar()} +} + +// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core" +// and "zapcore.WriteSyncer". +func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger { + // "AddCallerSkip" to annotate caller outside of "logutil" + lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer)) + return &zapRaftLogger{lg: lg, sugar: lg.Sugar()} +} + +type zapRaftLogger struct { + lg *zap.Logger + sugar *zap.SugaredLogger +} + +func (zl *zapRaftLogger) Debug(args ...interface{}) { + zl.sugar.Debug(args...) +} + +func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) { + zl.sugar.Debugf(format, args...) +} + +func (zl *zapRaftLogger) Error(args ...interface{}) { + zl.sugar.Error(args...) +} + +func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) { + zl.sugar.Errorf(format, args...) +} + +func (zl *zapRaftLogger) Info(args ...interface{}) { + zl.sugar.Info(args...) +} + +func (zl *zapRaftLogger) Infof(format string, args ...interface{}) { + zl.sugar.Infof(format, args...) +} + +func (zl *zapRaftLogger) Warning(args ...interface{}) { + zl.sugar.Warn(args...) +} + +func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) { + zl.sugar.Warnf(format, args...) +} + +func (zl *zapRaftLogger) Fatal(args ...interface{}) { + zl.sugar.Fatal(args...) +} + +func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) { + zl.sugar.Fatalf(format, args...) +} + +func (zl *zapRaftLogger) Panic(args ...interface{}) { + zl.sugar.Panic(args...) +} + +func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) { + zl.sugar.Panicf(format, args...) +} diff --git a/vendor/go.etcd.io/etcd/pkg/systemd/doc.go b/vendor/go.etcd.io/etcd/pkg/systemd/doc.go new file mode 100644 index 0000000000..30e77ce044 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/systemd/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package systemd provides utility functions for systemd. +package systemd diff --git a/vendor/go.etcd.io/etcd/pkg/systemd/journal.go b/vendor/go.etcd.io/etcd/pkg/systemd/journal.go new file mode 100644 index 0000000000..b861c69425 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/systemd/journal.go @@ -0,0 +1,29 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package systemd + +import "net" + +// DialJournal returns no error if the process can dial journal socket. +// Returns an error if dial failed, whichi indicates journald is not available +// (e.g. run embedded etcd as docker daemon). +// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go. +func DialJournal() error { + conn, err := net.Dial("unixgram", "/run/systemd/journal/socket") + if conn != nil { + defer conn.Close() + } + return err +} diff --git a/vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go b/vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go new file mode 100644 index 0000000000..b5916bb54d --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go @@ -0,0 +1,51 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import "crypto/tls" + +// cipher suites implemented by Go +// https://github.com/golang/go/blob/dev.boringcrypto.go1.10/src/crypto/tls/cipher_suites.go +var cipherSuites = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, +} + +// GetCipherSuite returns the corresponding cipher suite, +// and boolean value if it is supported. +func GetCipherSuite(s string) (uint16, bool) { + v, ok := cipherSuites[s] + return v, ok +} diff --git a/vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go b/vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go new file mode 100644 index 0000000000..3b6aa670ba --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tlsutil provides utility functions for handling TLS. +package tlsutil diff --git a/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go b/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go new file mode 100644 index 0000000000..3a5aef089a --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go @@ -0,0 +1,73 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "io/ioutil" +) + +// NewCertPool creates x509 certPool with provided CA files. +func NewCertPool(CAFiles []string) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, CAFile := range CAFiles { + pemByte, err := ioutil.ReadFile(CAFile) + if err != nil { + return nil, err + } + + for { + var block *pem.Block + block, pemByte = pem.Decode(pemByte) + if block == nil { + break + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + + certPool.AddCert(cert) + } + } + + return certPool, nil +} + +// NewCert generates TLS cert by using the given cert,key and parse function. +func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) { + cert, err := ioutil.ReadFile(certfile) + if err != nil { + return nil, err + } + + key, err := ioutil.ReadFile(keyfile) + if err != nil { + return nil, err + } + + if parseFunc == nil { + parseFunc = tls.X509KeyPair + } + + tlsCert, err := parseFunc(cert, key) + if err != nil { + return nil, err + } + return &tlsCert, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/doc.go b/vendor/go.etcd.io/etcd/pkg/transport/doc.go new file mode 100644 index 0000000000..37658ce591 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport implements various HTTP transport utilities based on Go +// net package. +package transport diff --git a/vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go b/vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go new file mode 100644 index 0000000000..4ff8e7f001 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go @@ -0,0 +1,94 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "crypto/tls" + "fmt" + "net" + "time" +) + +type keepAliveConn interface { + SetKeepAlive(bool) error + SetKeepAlivePeriod(d time.Duration) error +} + +// NewKeepAliveListener returns a listener that listens on the given address. +// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil. +// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake. +// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html +func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) { + if scheme == "https" { + if tlscfg == nil { + return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") + } + return newTLSKeepaliveListener(l, tlscfg), nil + } + + return &keepaliveListener{ + Listener: l, + }, nil +} + +type keepaliveListener struct{ net.Listener } + +func (kln *keepaliveListener) Accept() (net.Conn, error) { + c, err := kln.Listener.Accept() + if err != nil { + return nil, err + } + kac := c.(keepAliveConn) + // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl + // default on linux: 30 + 8 * 30 + // default on osx: 30 + 8 * 75 + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + return c, nil +} + +// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. +type tlsKeepaliveListener struct { + net.Listener + config *tls.Config +} + +// Accept waits for and returns the next incoming TLS connection. +// The returned connection c is a *tls.Conn. +func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { + c, err = l.Listener.Accept() + if err != nil { + return + } + kac := c.(keepAliveConn) + // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl + // default on linux: 30 + 8 * 30 + // default on osx: 30 + 8 * 75 + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + c = tls.Server(c, l.config) + return c, nil +} + +// NewListener creates a Listener which accepts connections from an inner +// Listener and wraps each connection with Server. +// The configuration config must be non-nil and must have +// at least one certificate. +func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener { + l := &tlsKeepaliveListener{} + l.Listener = inner + l.config = config + return l +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go b/vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go new file mode 100644 index 0000000000..930c542066 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go @@ -0,0 +1,80 @@ +// Copyright 2013 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides network utility functions, complementing the more +// common ones in the net package. +package transport + +import ( + "errors" + "net" + "sync" + "time" +) + +var ( + ErrNotTCP = errors.New("only tcp connections have keepalive") +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} + +func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlive(doKeepAlive) +} + +func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlivePeriod(d) +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/listener.go b/vendor/go.etcd.io/etcd/pkg/transport/listener.go new file mode 100644 index 0000000000..7260e4d079 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/listener.go @@ -0,0 +1,449 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net" + "os" + "path/filepath" + "strings" + "time" + + "go.etcd.io/etcd/pkg/fileutil" + "go.etcd.io/etcd/pkg/tlsutil" + + "go.uber.org/zap" +) + +// NewListener creates a new listner. +func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { + if l, err = newListener(addr, scheme); err != nil { + return nil, err + } + return wrapTLS(scheme, tlsinfo, l) +} + +func newListener(addr string, scheme string) (net.Listener, error) { + if scheme == "unix" || scheme == "unixs" { + // unix sockets via unix://laddr + return NewUnixListener(addr) + } + return net.Listen("tcp", addr) +} + +func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { + if scheme != "https" && scheme != "unixs" { + return l, nil + } + if tlsinfo != nil && tlsinfo.SkipClientSANVerify { + return NewTLSListener(l, tlsinfo) + } + return newTLSListener(l, tlsinfo, checkSAN) +} + +type TLSInfo struct { + CertFile string + KeyFile string + TrustedCAFile string + ClientCertAuth bool + CRLFile string + InsecureSkipVerify bool + SkipClientSANVerify bool + + // ServerName ensures the cert matches the given host in case of discovery / virtual hosting + ServerName string + + // HandshakeFailure is optionally called when a connection fails to handshake. The + // connection will be closed immediately afterwards. + HandshakeFailure func(*tls.Conn, error) + + // CipherSuites is a list of supported cipher suites. + // If empty, Go auto-populates it by default. + // Note that cipher suites are prioritized in the given order. + CipherSuites []uint16 + + selfCert bool + + // parseFunc exists to simplify testing. Typically, parseFunc + // should be left nil. In that case, tls.X509KeyPair will be used. + parseFunc func([]byte, []byte) (tls.Certificate, error) + + // AllowedCN is a CN which must be provided by a client. + AllowedCN string + + // AllowedHostname is an IP address or hostname that must match the TLS + // certificate provided by a client. + AllowedHostname string + + // Logger logs TLS errors. + // If nil, all logs are discarded. + Logger *zap.Logger + + // EmptyCN indicates that the cert must have empty CN. + // If true, ClientConfig() will return an error for a cert with non empty CN. + EmptyCN bool +} + +func (info TLSInfo) String() string { + return fmt.Sprintf("cert = %s, key = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) +} + +func (info TLSInfo) Empty() bool { + return info.CertFile == "" && info.KeyFile == "" +} + +func SelfCert(lg *zap.Logger, dirpath string, hosts []string, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) { + info.Logger = lg + err = fileutil.TouchDirAll(dirpath) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot create cert directory", + zap.Error(err), + ) + } + return + } + + certPath := filepath.Join(dirpath, "cert.pem") + keyPath := filepath.Join(dirpath, "key.pem") + _, errcert := os.Stat(certPath) + _, errkey := os.Stat(keyPath) + if errcert == nil && errkey == nil { + info.CertFile = certPath + info.KeyFile = keyPath + info.selfCert = true + return + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate random number", + zap.Error(err), + ) + } + return + } + + tmpl := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{Organization: []string{"etcd"}}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * (24 * time.Hour)), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...), + BasicConstraintsValid: true, + } + + for _, host := range hosts { + h, _, _ := net.SplitHostPort(host) + if ip := net.ParseIP(h); ip != nil { + tmpl.IPAddresses = append(tmpl.IPAddresses, ip) + } else { + tmpl.DNSNames = append(tmpl.DNSNames, h) + } + } + + priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate ECDSA key", + zap.Error(err), + ) + } + return + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate x509 certificate", + zap.Error(err), + ) + } + return + } + + certOut, err := os.Create(certPath) + if err != nil { + info.Logger.Warn( + "cannot cert file", + zap.String("path", certPath), + zap.Error(err), + ) + return + } + pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + certOut.Close() + if info.Logger != nil { + info.Logger.Info("created cert file", zap.String("path", certPath)) + } + + b, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return + } + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot key file", + zap.String("path", keyPath), + zap.Error(err), + ) + } + return + } + pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) + keyOut.Close() + if info.Logger != nil { + info.Logger.Info("created key file", zap.String("path", keyPath)) + } + return SelfCert(lg, dirpath, hosts) +} + +// baseConfig is called on initial TLS handshake start. +// +// Previously, +// 1. Server has non-empty (*tls.Config).Certificates on client hello +// 2. Server calls (*tls.Config).GetCertificate iff: +// - Server's (*tls.Config).Certificates is not empty, or +// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName +// +// When (*tls.Config).Certificates is always populated on initial handshake, +// client is expected to provide a valid matching SNI to pass the TLS +// verification, thus trigger server (*tls.Config).GetCertificate to reload +// TLS assets. However, a cert whose SAN field does not include domain names +// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus +// it was never able to trigger TLS reload on initial handshake; first +// ceritifcate object was being used, never being updated. +// +// Now, (*tls.Config).Certificates is created empty on initial TLS client +// handshake, in order to trigger (*tls.Config).GetCertificate and populate +// rest of the certificates on every new TLS connection, even when client +// SNI is empty (e.g. cert only includes IPs). +func (info TLSInfo) baseConfig() (*tls.Config, error) { + if info.KeyFile == "" || info.CertFile == "" { + return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) + } + if info.Logger == nil { + info.Logger = zap.NewNop() + } + + _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if err != nil { + return nil, err + } + + cfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + ServerName: info.ServerName, + } + + if len(info.CipherSuites) > 0 { + cfg.CipherSuites = info.CipherSuites + } + + // Client certificates may be verified by either an exact match on the CN, + // or a more general check of the CN and SANs. + var verifyCertificate func(*x509.Certificate) bool + if info.AllowedCN != "" { + if info.AllowedHostname != "" { + return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) + } + verifyCertificate = func(cert *x509.Certificate) bool { + return info.AllowedCN == cert.Subject.CommonName + } + } + if info.AllowedHostname != "" { + verifyCertificate = func(cert *x509.Certificate) bool { + return cert.VerifyHostname(info.AllowedHostname) == nil + } + } + if verifyCertificate != nil { + cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + for _, chains := range verifiedChains { + if len(chains) != 0 { + if verifyCertificate(chains[0]) { + return nil + } + } + } + return errors.New("client certificate authentication failed") + } + } + + // this only reloads certs when there's a client request + // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "failed to find peer cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "failed to create peer certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err + } + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "failed to find client cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "failed to create client certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err + } + return cfg, nil +} + +// cafiles returns a list of CA file paths. +func (info TLSInfo) cafiles() []string { + cs := make([]string, 0) + if info.TrustedCAFile != "" { + cs = append(cs, info.TrustedCAFile) + } + return cs +} + +// ServerConfig generates a tls.Config object for use by an HTTP server. +func (info TLSInfo) ServerConfig() (*tls.Config, error) { + cfg, err := info.baseConfig() + if err != nil { + return nil, err + } + + cfg.ClientAuth = tls.NoClientCert + if info.TrustedCAFile != "" || info.ClientCertAuth { + cfg.ClientAuth = tls.RequireAndVerifyClientCert + } + + cs := info.cafiles() + if len(cs) > 0 { + cp, err := tlsutil.NewCertPool(cs) + if err != nil { + return nil, err + } + cfg.ClientCAs = cp + } + + // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server + cfg.NextProtos = []string{"h2"} + + return cfg, nil +} + +// ClientConfig generates a tls.Config object for use by an HTTP client. +func (info TLSInfo) ClientConfig() (*tls.Config, error) { + var cfg *tls.Config + var err error + + if !info.Empty() { + cfg, err = info.baseConfig() + if err != nil { + return nil, err + } + } else { + cfg = &tls.Config{ServerName: info.ServerName} + } + cfg.InsecureSkipVerify = info.InsecureSkipVerify + + cs := info.cafiles() + if len(cs) > 0 { + cfg.RootCAs, err = tlsutil.NewCertPool(cs) + if err != nil { + return nil, err + } + } + + if info.selfCert { + cfg.InsecureSkipVerify = true + } + + if info.EmptyCN { + hasNonEmptyCN := false + cn := "" + tlsutil.NewCert(info.CertFile, info.KeyFile, func(certPEMBlock []byte, keyPEMBlock []byte) (tls.Certificate, error) { + var block *pem.Block + block, _ = pem.Decode(certPEMBlock) + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return tls.Certificate{}, err + } + if len(cert.Subject.CommonName) != 0 { + hasNonEmptyCN = true + cn = cert.Subject.CommonName + } + return tls.X509KeyPair(certPEMBlock, keyPEMBlock) + }) + if hasNonEmptyCN { + return nil, fmt.Errorf("cert has non empty Common Name (%s)", cn) + } + } + + return cfg, nil +} + +// IsClosedConnError returns true if the error is from closing listener, cmux. +// copied from golang.org/x/net/http2/http2.go +func IsClosedConnError(err error) bool { + // 'use of closed network connection' (Go <=1.8) + // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) + // 'mux: listener closed' (cmux.ErrListenerClosed) + return err != nil && strings.Contains(err.Error(), "closed") +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go b/vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go new file mode 100644 index 0000000000..6f1600945c --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go @@ -0,0 +1,272 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "strings" + "sync" +) + +// tlsListener overrides a TLS listener so it will reject client +// certificates with insufficient SAN credentials or CRL revoked +// certificates. +type tlsListener struct { + net.Listener + connc chan net.Conn + donec chan struct{} + err error + handshakeFailure func(*tls.Conn, error) + check tlsCheckFunc +} + +type tlsCheckFunc func(context.Context, *tls.Conn) error + +// NewTLSListener handshakes TLS connections and performs optional CRL checking. +func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { + check := func(context.Context, *tls.Conn) error { return nil } + return newTLSListener(l, tlsinfo, check) +} + +func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) { + if tlsinfo == nil || tlsinfo.Empty() { + l.Close() + return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) + } + tlscfg, err := tlsinfo.ServerConfig() + if err != nil { + return nil, err + } + + hf := tlsinfo.HandshakeFailure + if hf == nil { + hf = func(*tls.Conn, error) {} + } + + if len(tlsinfo.CRLFile) > 0 { + prevCheck := check + check = func(ctx context.Context, tlsConn *tls.Conn) error { + if err := prevCheck(ctx, tlsConn); err != nil { + return err + } + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + return checkCRL(tlsinfo.CRLFile, certs) + } + return nil + } + } + + tlsl := &tlsListener{ + Listener: tls.NewListener(l, tlscfg), + connc: make(chan net.Conn), + donec: make(chan struct{}), + handshakeFailure: hf, + check: check, + } + go tlsl.acceptLoop() + return tlsl, nil +} + +func (l *tlsListener) Accept() (net.Conn, error) { + select { + case conn := <-l.connc: + return conn, nil + case <-l.donec: + return nil, l.err + } +} + +func checkSAN(ctx context.Context, tlsConn *tls.Conn) error { + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + addr := tlsConn.RemoteAddr().String() + return checkCertSAN(ctx, certs[0], addr) + } + return nil +} + +// acceptLoop launches each TLS handshake in a separate goroutine +// to prevent a hanging TLS connection from blocking other connections. +func (l *tlsListener) acceptLoop() { + var wg sync.WaitGroup + var pendingMu sync.Mutex + + pending := make(map[net.Conn]struct{}) + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + pendingMu.Lock() + for c := range pending { + c.Close() + } + pendingMu.Unlock() + wg.Wait() + close(l.donec) + }() + + for { + conn, err := l.Listener.Accept() + if err != nil { + l.err = err + return + } + + pendingMu.Lock() + pending[conn] = struct{}{} + pendingMu.Unlock() + + wg.Add(1) + go func() { + defer func() { + if conn != nil { + conn.Close() + } + wg.Done() + }() + + tlsConn := conn.(*tls.Conn) + herr := tlsConn.Handshake() + pendingMu.Lock() + delete(pending, conn) + pendingMu.Unlock() + + if herr != nil { + l.handshakeFailure(tlsConn, herr) + return + } + if err := l.check(ctx, tlsConn); err != nil { + l.handshakeFailure(tlsConn, err) + return + } + + select { + case l.connc <- tlsConn: + conn = nil + case <-ctx.Done(): + } + }() + } +} + +func checkCRL(crlPath string, cert []*x509.Certificate) error { + // TODO: cache + crlBytes, err := ioutil.ReadFile(crlPath) + if err != nil { + return err + } + certList, err := x509.ParseCRL(crlBytes) + if err != nil { + return err + } + revokedSerials := make(map[string]struct{}) + for _, rc := range certList.TBSCertList.RevokedCertificates { + revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{} + } + for _, c := range cert { + serial := string(c.SerialNumber.Bytes()) + if _, ok := revokedSerials[serial]; ok { + return fmt.Errorf("transport: certificate serial %x revoked", serial) + } + } + return nil +} + +func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { + if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { + return nil + } + h, _, herr := net.SplitHostPort(remoteAddr) + if herr != nil { + return herr + } + if len(cert.IPAddresses) > 0 { + cerr := cert.VerifyHostname(h) + if cerr == nil { + return nil + } + if len(cert.DNSNames) == 0 { + return cerr + } + } + if len(cert.DNSNames) > 0 { + ok, err := isHostInDNS(ctx, h, cert.DNSNames) + if ok { + return nil + } + errStr := "" + if err != nil { + errStr = " (" + err.Error() + ")" + } + return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) + } + return nil +} + +func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { + // reverse lookup + wildcards, names := []string{}, []string{} + for _, dns := range dnsNames { + if strings.HasPrefix(dns, "*.") { + wildcards = append(wildcards, dns[1:]) + } else { + names = append(names, dns) + } + } + lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) + for _, name := range lnames { + // strip trailing '.' from PTR record + if name[len(name)-1] == '.' { + name = name[:len(name)-1] + } + for _, wc := range wildcards { + if strings.HasSuffix(name, wc) { + return true, nil + } + } + for _, n := range names { + if n == name { + return true, nil + } + } + } + err = lerr + + // forward lookup + for _, dns := range names { + addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) + if lerr != nil { + err = lerr + continue + } + for _, addr := range addrs { + if addr == host { + return true, nil + } + } + } + return false, err +} + +func (l *tlsListener) Close() error { + err := l.Listener.Close() + <-l.donec + return err +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go new file mode 100644 index 0000000000..7e8c02030f --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go @@ -0,0 +1,44 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +type timeoutConn struct { + net.Conn + wtimeoutd time.Duration + rdtimeoutd time.Duration +} + +func (c timeoutConn) Write(b []byte) (n int, err error) { + if c.wtimeoutd > 0 { + if err := c.SetWriteDeadline(time.Now().Add(c.wtimeoutd)); err != nil { + return 0, err + } + } + return c.Conn.Write(b) +} + +func (c timeoutConn) Read(b []byte) (n int, err error) { + if c.rdtimeoutd > 0 { + if err := c.SetReadDeadline(time.Now().Add(c.rdtimeoutd)); err != nil { + return 0, err + } + } + return c.Conn.Read(b) +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go new file mode 100644 index 0000000000..6ae39ecfc9 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go @@ -0,0 +1,36 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +type rwTimeoutDialer struct { + wtimeoutd time.Duration + rdtimeoutd time.Duration + net.Dialer +} + +func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) { + conn, err := d.Dialer.Dial(network, address) + tconn := &timeoutConn{ + rdtimeoutd: d.rdtimeoutd, + wtimeoutd: d.wtimeoutd, + Conn: conn, + } + return tconn, err +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go new file mode 100644 index 0000000000..273e99fe03 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go @@ -0,0 +1,57 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +// NewTimeoutListener returns a listener that listens on the given address. +// If read/write on the accepted connection blocks longer than its time limit, +// it will return timeout error. +func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { + ln, err := newListener(addr, scheme) + if err != nil { + return nil, err + } + ln = &rwTimeoutListener{ + Listener: ln, + rdtimeoutd: rdtimeoutd, + wtimeoutd: wtimeoutd, + } + if ln, err = wrapTLS(scheme, tlsinfo, ln); err != nil { + return nil, err + } + return ln, nil +} + +type rwTimeoutListener struct { + net.Listener + wtimeoutd time.Duration + rdtimeoutd time.Duration +} + +func (rwln *rwTimeoutListener) Accept() (net.Conn, error) { + c, err := rwln.Listener.Accept() + if err != nil { + return nil, err + } + return timeoutConn{ + Conn: c, + wtimeoutd: rwln.wtimeoutd, + rdtimeoutd: rwln.rdtimeoutd, + }, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go new file mode 100644 index 0000000000..ea16b4c0f8 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go @@ -0,0 +1,51 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "net/http" + "time" +) + +// NewTimeoutTransport returns a transport created using the given TLS info. +// If read/write on the created connection blocks longer than its time limit, +// it will return timeout error. +// If read/write timeout is set, transport will not be able to reuse connection. +func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) { + tr, err := NewTransport(info, dialtimeoutd) + if err != nil { + return nil, err + } + + if rdtimeoutd != 0 || wtimeoutd != 0 { + // the timed out connection will timeout soon after it is idle. + // it should not be put back to http transport as an idle connection for future usage. + tr.MaxIdleConnsPerHost = -1 + } else { + // allow more idle connections between peers to avoid unnecessary port allocation. + tr.MaxIdleConnsPerHost = 1024 + } + + tr.Dial = (&rwTimeoutDialer{ + Dialer: net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + }, + rdtimeoutd: rdtimeoutd, + wtimeoutd: wtimeoutd, + }).Dial + return tr, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/tls.go b/vendor/go.etcd.io/etcd/pkg/transport/tls.go new file mode 100644 index 0000000000..62fe0d3851 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/tls.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "strings" + "time" +) + +// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those +// endpoints that could be validated as secure. +func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { + t, err := NewTransport(tlsInfo, 5*time.Second) + if err != nil { + return nil, err + } + var errs []string + var endpoints []string + for _, ep := range eps { + if !strings.HasPrefix(ep, "https://") { + errs = append(errs, fmt.Sprintf("%q is insecure", ep)) + continue + } + conn, cerr := t.Dial("tcp", ep[len("https://"):]) + if cerr != nil { + errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr)) + continue + } + conn.Close() + endpoints = append(endpoints, ep) + } + if len(errs) != 0 { + err = fmt.Errorf("%s", strings.Join(errs, ",")) + } + return endpoints, err +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/transport.go b/vendor/go.etcd.io/etcd/pkg/transport/transport.go new file mode 100644 index 0000000000..4a7fe69d2e --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/transport.go @@ -0,0 +1,71 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "net/http" + "strings" + "time" +) + +type unixTransport struct{ *http.Transport } + +func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) { + cfg, err := info.ClientConfig() + if err != nil { + return nil, err + } + + t := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: dialtimeoutd, + // value taken from http.DefaultTransport + KeepAlive: 30 * time.Second, + }).Dial, + // value taken from http.DefaultTransport + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + } + + dialer := (&net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + }) + dial := func(net, addr string) (net.Conn, error) { + return dialer.Dial("unix", addr) + } + + tu := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + } + ut := &unixTransport{tu} + + t.RegisterProtocol("unix", ut) + t.RegisterProtocol("unixs", ut) + + return t, nil +} + +func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) { + url := *req.URL + req.URL = &url + req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1) + return urt.Transport.RoundTrip(req) +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go b/vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go new file mode 100644 index 0000000000..123e2036f0 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go @@ -0,0 +1,40 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "os" +) + +type unixListener struct{ net.Listener } + +func NewUnixListener(addr string) (net.Listener, error) { + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + return nil, err + } + l, err := net.Listen("unix", addr) + if err != nil { + return nil, err + } + return &unixListener{l}, nil +} + +func (ul *unixListener) Close() error { + if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) { + return err + } + return ul.Listener.Close() +} diff --git a/vendor/go.etcd.io/etcd/pkg/types/doc.go b/vendor/go.etcd.io/etcd/pkg/types/doc.go new file mode 100644 index 0000000000..de8ef0bd71 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/types/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types declares various data types and implements type-checking +// functions. +package types diff --git a/vendor/go.etcd.io/etcd/pkg/types/id.go b/vendor/go.etcd.io/etcd/pkg/types/id.go new file mode 100644 index 0000000000..ae00388dde --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/types/id.go @@ -0,0 +1,39 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "strconv" + +// ID represents a generic identifier which is canonically +// stored as a uint64 but is typically represented as a +// base-16 string for input/output +type ID uint64 + +func (i ID) String() string { + return strconv.FormatUint(uint64(i), 16) +} + +// IDFromString attempts to create an ID from a base-16 string. +func IDFromString(s string) (ID, error) { + i, err := strconv.ParseUint(s, 16, 64) + return ID(i), err +} + +// IDSlice implements the sort interface +type IDSlice []ID + +func (p IDSlice) Len() int { return len(p) } +func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } +func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/go.etcd.io/etcd/pkg/types/set.go b/vendor/go.etcd.io/etcd/pkg/types/set.go new file mode 100644 index 0000000000..e7a3cdc9ab --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/types/set.go @@ -0,0 +1,195 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "sort" + "sync" +) + +type Set interface { + Add(string) + Remove(string) + Contains(string) bool + Equals(Set) bool + Length() int + Values() []string + Copy() Set + Sub(Set) Set +} + +func NewUnsafeSet(values ...string) *unsafeSet { + set := &unsafeSet{make(map[string]struct{})} + for _, v := range values { + set.Add(v) + } + return set +} + +func NewThreadsafeSet(values ...string) *tsafeSet { + us := NewUnsafeSet(values...) + return &tsafeSet{us, sync.RWMutex{}} +} + +type unsafeSet struct { + d map[string]struct{} +} + +// Add adds a new value to the set (no-op if the value is already present) +func (us *unsafeSet) Add(value string) { + us.d[value] = struct{}{} +} + +// Remove removes the given value from the set +func (us *unsafeSet) Remove(value string) { + delete(us.d, value) +} + +// Contains returns whether the set contains the given value +func (us *unsafeSet) Contains(value string) (exists bool) { + _, exists = us.d[value] + return exists +} + +// ContainsAll returns whether the set contains all given values +func (us *unsafeSet) ContainsAll(values []string) bool { + for _, s := range values { + if !us.Contains(s) { + return false + } + } + return true +} + +// Equals returns whether the contents of two sets are identical +func (us *unsafeSet) Equals(other Set) bool { + v1 := sort.StringSlice(us.Values()) + v2 := sort.StringSlice(other.Values()) + v1.Sort() + v2.Sort() + return reflect.DeepEqual(v1, v2) +} + +// Length returns the number of elements in the set +func (us *unsafeSet) Length() int { + return len(us.d) +} + +// Values returns the values of the Set in an unspecified order. +func (us *unsafeSet) Values() (values []string) { + values = make([]string, 0) + for val := range us.d { + values = append(values, val) + } + return values +} + +// Copy creates a new Set containing the values of the first +func (us *unsafeSet) Copy() Set { + cp := NewUnsafeSet() + for val := range us.d { + cp.Add(val) + } + + return cp +} + +// Sub removes all elements in other from the set +func (us *unsafeSet) Sub(other Set) Set { + oValues := other.Values() + result := us.Copy().(*unsafeSet) + + for _, val := range oValues { + if _, ok := result.d[val]; !ok { + continue + } + delete(result.d, val) + } + + return result +} + +type tsafeSet struct { + us *unsafeSet + m sync.RWMutex +} + +func (ts *tsafeSet) Add(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Add(value) +} + +func (ts *tsafeSet) Remove(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Remove(value) +} + +func (ts *tsafeSet) Contains(value string) (exists bool) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Contains(value) +} + +func (ts *tsafeSet) Equals(other Set) bool { + ts.m.RLock() + defer ts.m.RUnlock() + + // If ts and other represent the same variable, avoid calling + // ts.us.Equals(other), to avoid double RLock bug + if _other, ok := other.(*tsafeSet); ok { + if _other == ts { + return true + } + } + return ts.us.Equals(other) +} + +func (ts *tsafeSet) Length() int { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Length() +} + +func (ts *tsafeSet) Values() (values []string) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Values() +} + +func (ts *tsafeSet) Copy() Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Copy().(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} + +func (ts *tsafeSet) Sub(other Set) Set { + ts.m.RLock() + defer ts.m.RUnlock() + + // If ts and other represent the same variable, avoid calling + // ts.us.Sub(other), to avoid double RLock bug + if _other, ok := other.(*tsafeSet); ok { + if _other == ts { + usResult := NewUnsafeSet() + return &tsafeSet{usResult, sync.RWMutex{}} + } + } + usResult := ts.us.Sub(other).(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} diff --git a/vendor/go.etcd.io/etcd/pkg/types/slice.go b/vendor/go.etcd.io/etcd/pkg/types/slice.go new file mode 100644 index 0000000000..0dd9ca798a --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/types/slice.go @@ -0,0 +1,22 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// Uint64Slice implements sort interface +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/go.etcd.io/etcd/pkg/types/urls.go b/vendor/go.etcd.io/etcd/pkg/types/urls.go new file mode 100644 index 0000000000..9e5d03ff64 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/types/urls.go @@ -0,0 +1,82 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "net" + "net/url" + "sort" + "strings" +) + +type URLs []url.URL + +func NewURLs(strs []string) (URLs, error) { + all := make([]url.URL, len(strs)) + if len(all) == 0 { + return nil, errors.New("no valid URLs given") + } + for i, in := range strs { + in = strings.TrimSpace(in) + u, err := url.Parse(in) + if err != nil { + return nil, err + } + if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { + return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) + } + if _, _, err := net.SplitHostPort(u.Host); err != nil { + return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) + } + if u.Path != "" { + return nil, fmt.Errorf("URL must not contain a path: %s", in) + } + all[i] = *u + } + us := URLs(all) + us.Sort() + + return us, nil +} + +func MustNewURLs(strs []string) URLs { + urls, err := NewURLs(strs) + if err != nil { + panic(err) + } + return urls +} + +func (us URLs) String() string { + return strings.Join(us.StringSlice(), ",") +} + +func (us *URLs) Sort() { + sort.Sort(us) +} +func (us URLs) Len() int { return len(us) } +func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } +func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } + +func (us URLs) StringSlice() []string { + out := make([]string, len(us)) + for i := range us { + out[i] = us[i].String() + } + + return out +} diff --git a/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go b/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go new file mode 100644 index 0000000000..47690cc381 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go @@ -0,0 +1,107 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "sort" + "strings" +) + +// URLsMap is a map from a name to its URLs. +type URLsMap map[string]URLs + +// NewURLsMap returns a URLsMap instantiated from the given string, +// which consists of discovery-formatted names-to-URLs, like: +// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 +func NewURLsMap(s string) (URLsMap, error) { + m := parse(s) + + cl := URLsMap{} + for name, urls := range m { + us, err := NewURLs(urls) + if err != nil { + return nil, err + } + cl[name] = us + } + return cl, nil +} + +// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The +// string values in the map can be multiple values separated by the sep string. +func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { + var err error + um := URLsMap{} + for k, v := range m { + um[k], err = NewURLs(strings.Split(v, sep)) + if err != nil { + return nil, err + } + } + return um, nil +} + +// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. +func (c URLsMap) String() string { + var pairs []string + for name, urls := range c { + for _, url := range urls { + pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) + } + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +// URLs returns a list of all URLs. +// The returned list is sorted in ascending lexicographical order. +func (c URLsMap) URLs() []string { + var urls []string + for _, us := range c { + for _, u := range us { + urls = append(urls, u.String()) + } + } + sort.Strings(urls) + return urls +} + +// Len returns the size of URLsMap. +func (c URLsMap) Len() int { + return len(c) +} + +// parse parses the given string and returns a map listing the values specified for each key. +func parse(s string) map[string][]string { + m := make(map[string][]string) + for s != "" { + key := s + if i := strings.IndexAny(key, ","); i >= 0 { + key, s = key[:i], key[i+1:] + } else { + s = "" + } + if key == "" { + continue + } + value := "" + if i := strings.Index(key, "="); i >= 0 { + key, value = key[:i], key[i+1:] + } + m[key] = append(m[key], value) + } + return m +} diff --git a/vendor/go.etcd.io/etcd/raft/OWNERS b/vendor/go.etcd.io/etcd/raft/OWNERS new file mode 100644 index 0000000000..ab781066e2 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/OWNERS @@ -0,0 +1,19 @@ +approvers: +- heyitsanthony +- philips +- fanminshi +- gyuho +- mitake +- jpbetz +- xiang90 +- bdarnell +reviewers: +- heyitsanthony +- philips +- fanminshi +- gyuho +- mitake +- jpbetz +- xiang90 +- bdarnell +- tschottdorf diff --git a/vendor/go.etcd.io/etcd/raft/README.md b/vendor/go.etcd.io/etcd/raft/README.md new file mode 100644 index 0000000000..83cf04035c --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/README.md @@ -0,0 +1,197 @@ +# Raft library + +Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. +The state machine is kept in sync through the use of a replicated log. +For more details on Raft, see "In Search of an Understandable Consensus Algorithm" +(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout. + +This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more. + +Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance. + +To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state. + +In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output. + +A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/etcd-io/etcd/tree/master/contrib/raftexample + +# Features + +This raft implementation is a full feature implementation of Raft protocol. Features includes: + +- Leader election +- Log replication +- Log compaction +- Membership changes +- Leadership transfer extension +- Efficient linearizable read-only queries served by both the leader and followers + - leader checks with quorum and bypasses Raft log before processing read-only queries + - followers asks leader to get a safe read index before processing read-only queries +- More efficient lease-based linearizable read-only queries served by both the leader and followers + - leader bypasses Raft log and processing read-only queries locally + - followers asks leader to get a safe read index before processing read-only queries + - this approach relies on the clock of the all the machines in raft group + +This raft implementation also includes a few optional enhancements: + +- Optimistic pipelining to reduce log replication latency +- Flow control for log replication +- Batching Raft messages to reduce synchronized network I/O calls +- Batching log entries to reduce disk synchronized I/O +- Writing to leader's disk in parallel +- Internal proposal redirection from followers to leader +- Automatic stepping down when the leader loses quorum +- Protection against unbounded log growth when quorum is lost + +## Notable Users + +- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database +- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database +- [etcd](https://github.com/etcd-io/etcd) A distributed reliable key-value store +- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft +- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale. +- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks + +## Usage + +The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode. + +To start a three-node cluster +```go + storage := raft.NewMemoryStorage() + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + // Set peer list to the other nodes in the cluster. + // Note that they need to be started separately as well. + n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) +``` + +Start a single node cluster, like so: +```go + // Create storage and config as shown above. + // Set peer list to itself, so this node can become the leader of this single-node cluster. + peers := []raft.Peer{{ID: 0x01}} + n := raft.StartNode(c, peers) +``` + +To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so: +```go + // Create storage and config as shown above. + n := raft.StartNode(c, nil) +``` + +To restart a node from previous state: +```go + storage := raft.NewMemoryStorage() + + // Recover the in-memory storage from persistent snapshot, state and entries. + storage.ApplySnapshot(snapshot) + storage.SetHardState(state) + storage.Append(entries) + + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + + // Restart raft without peer information. + // Peer information is already included in the storage. + n := raft.RestartNode(c) +``` + +After creating a Node, the user has a few responsibilities: + +First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2. + +1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded. + +2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop. + +3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node). + +4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready. + +Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied. + +Third, after receiving a message from another node, pass it to Node.Step: + +```go + func recvRaftRPC(ctx context.Context, m raftpb.Message) { + n.Step(ctx, m) + } +``` + +Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick". + +The total state machine handling loop will look something like this: + +```go + for { + select { + case <-s.Ticker: + n.Tick() + case rd := <-s.Node.Ready(): + saveToStorage(rd.HardState, rd.Entries, rd.Snapshot) + send(rd.Messages) + if !raft.IsEmptySnap(rd.Snapshot) { + processSnapshot(rd.Snapshot) + } + for _, entry := range rd.CommittedEntries { + process(entry) + if entry.Type == raftpb.EntryConfChange { + var cc raftpb.ConfChange + cc.Unmarshal(entry.Data) + s.Node.ApplyConfChange(cc) + } + } + s.Node.Advance() + case <-s.done: + return + } + } +``` + +To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call: + +```go + n.Propose(ctx, data) +``` + +If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. + +To add or remove node in a cluster, build ConfChange struct 'cc' and call: + +```go + n.ProposeConfChange(ctx, cc) +``` + +After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through: + +```go + var cc raftpb.ConfChange + cc.Unmarshal(data) + n.ApplyConfChange(cc) +``` + +Note: An ID represents a unique node in a cluster for all time. A +given ID MUST be used only once even if the old node has been removed. +This means that for example IP addresses make poor node IDs since they +may be reused. Node IDs must be non-zero. + +## Implementation notes + +This implementation is up to date with the final Raft thesis (https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap. + +To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log. + +This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster. diff --git a/vendor/go.etcd.io/etcd/raft/bootstrap.go b/vendor/go.etcd.io/etcd/raft/bootstrap.go new file mode 100644 index 0000000000..bd82b2041a --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/bootstrap.go @@ -0,0 +1,80 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +// Bootstrap initializes the RawNode for first use by appending configuration +// changes for the supplied peers. This method returns an error if the Storage +// is nonempty. +// +// It is recommended that instead of calling this method, applications bootstrap +// their state manually by setting up a Storage that has a first index > 1 and +// which stores the desired ConfState as its InitialState. +func (rn *RawNode) Bootstrap(peers []Peer) error { + if len(peers) == 0 { + return errors.New("must provide at least one peer to Bootstrap") + } + lastIndex, err := rn.raft.raftLog.storage.LastIndex() + if err != nil { + return err + } + + if lastIndex != 0 { + return errors.New("can't bootstrap a nonempty Storage") + } + + // We've faked out initial entries above, but nothing has been + // persisted. Start with an empty HardState (thus the first Ready will + // emit a HardState update for the app to persist). + rn.prevHardSt = emptyState + + // TODO(tbg): remove StartNode and give the application the right tools to + // bootstrap the initial membership in a cleaner way. + rn.raft.becomeFollower(1, None) + ents := make([]pb.Entry, len(peers)) + for i, peer := range peers { + cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} + data, err := cc.Marshal() + if err != nil { + return err + } + + ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} + } + rn.raft.raftLog.append(ents...) + + // Now apply them, mainly so that the application can call Campaign + // immediately after StartNode in tests. Note that these nodes will + // be added to raft twice: here and when the application's Ready + // loop calls ApplyConfChange. The calls to addNode must come after + // all calls to raftLog.append so progress.next is set after these + // bootstrapping entries (it is an error if we try to append these + // entries since they have already been committed). + // We do not set raftLog.applied so the application will be able + // to observe all conf changes via Ready.CommittedEntries. + // + // TODO(bdarnell): These entries are still unstable; do we need to preserve + // the invariant that committed < unstable? + rn.raft.raftLog.committed = uint64(len(ents)) + for _, peer := range peers { + rn.raft.applyConfChange(pb.ConfChange{NodeID: peer.ID, Type: pb.ConfChangeAddNode}.AsV2()) + } + return nil +} diff --git a/vendor/go.etcd.io/etcd/raft/confchange/confchange.go b/vendor/go.etcd.io/etcd/raft/confchange/confchange.go new file mode 100644 index 0000000000..a0dc486df4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/confchange/confchange.go @@ -0,0 +1,425 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confchange + +import ( + "errors" + "fmt" + "strings" + + "go.etcd.io/etcd/raft/quorum" + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// Changer facilitates configuration changes. It exposes methods to handle +// simple and joint consensus while performing the proper validation that allows +// refusing invalid configuration changes before they affect the active +// configuration. +type Changer struct { + Tracker tracker.ProgressTracker + LastIndex uint64 +} + +// EnterJoint verifies that the outgoing (=right) majority config of the joint +// config is empty and initializes it with a copy of the incoming (=left) +// majority config. That is, it transitions from +// +// (1 2 3)&&() +// to +// (1 2 3)&&(1 2 3). +// +// The supplied changes are then applied to the incoming majority config, +// resulting in a joint configuration that in terms of the Raft thesis[1] +// (Section 4.3) corresponds to `C_{new,old}`. +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +func (c Changer) EnterJoint(autoLeave bool, ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if joint(cfg) { + err := errors.New("config is already joint") + return c.err(err) + } + if len(incoming(cfg.Voters)) == 0 { + // We allow adding nodes to an empty config for convenience (testing and + // bootstrap), but you can't enter a joint state. + err := errors.New("can't make a zero-voter config joint") + return c.err(err) + } + // Clear the outgoing config. + *outgoingPtr(&cfg.Voters) = quorum.MajorityConfig{} + // Copy incoming to outgoing. + for id := range incoming(cfg.Voters) { + outgoing(cfg.Voters)[id] = struct{}{} + } + + if err := c.apply(&cfg, prs, ccs...); err != nil { + return c.err(err) + } + cfg.AutoLeave = autoLeave + return checkAndReturn(cfg, prs) +} + +// LeaveJoint transitions out of a joint configuration. It is an error to call +// this method if the configuration is not joint, i.e. if the outgoing majority +// config Voters[1] is empty. +// +// The outgoing majority config of the joint configuration will be removed, +// that is, the incoming config is promoted as the sole decision maker. In the +// notation of the Raft thesis[1] (Section 4.3), this method transitions from +// `C_{new,old}` into `C_new`. +// +// At the same time, any staged learners (LearnersNext) the addition of which +// was held back by an overlapping voter in the former outgoing config will be +// inserted into Learners. +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if !joint(cfg) { + err := errors.New("can't leave a non-joint config") + return c.err(err) + } + if len(outgoing(cfg.Voters)) == 0 { + err := fmt.Errorf("configuration is not joint: %v", cfg) + return c.err(err) + } + for id := range cfg.LearnersNext { + nilAwareAdd(&cfg.Learners, id) + prs[id].IsLearner = true + } + cfg.LearnersNext = nil + + for id := range outgoing(cfg.Voters) { + _, isVoter := incoming(cfg.Voters)[id] + _, isLearner := cfg.Learners[id] + + if !isVoter && !isLearner { + delete(prs, id) + } + } + *outgoingPtr(&cfg.Voters) = nil + cfg.AutoLeave = false + + return checkAndReturn(cfg, prs) +} + +// Simple carries out a series of configuration changes that (in aggregate) +// mutates the incoming majority config Voters[0] by at most one. This method +// will return an error if that is not the case, if the resulting quorum is +// zero, or if the configuration is in a joint state (i.e. if there is an +// outgoing configuration). +func (c Changer) Simple(ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if joint(cfg) { + err := errors.New("can't apply simple config change in joint config") + return c.err(err) + } + if err := c.apply(&cfg, prs, ccs...); err != nil { + return c.err(err) + } + if n := symdiff(incoming(c.Tracker.Voters), incoming(cfg.Voters)); n > 1 { + return tracker.Config{}, nil, errors.New("more than one voter changed without entering joint config") + } + if err := checkInvariants(cfg, prs); err != nil { + return tracker.Config{}, tracker.ProgressMap{}, nil + } + + return checkAndReturn(cfg, prs) +} + +// apply a change to the configuration. By convention, changes to voters are +// always made to the incoming majority config Voters[0]. Voters[1] is either +// empty or preserves the outgoing majority configuration while in a joint state. +func (c Changer) apply(cfg *tracker.Config, prs tracker.ProgressMap, ccs ...pb.ConfChangeSingle) error { + for _, cc := range ccs { + if cc.NodeID == 0 { + // etcd replaces the NodeID with zero if it decides (downstream of + // raft) to not apply a change, so we have to have explicit code + // here to ignore these. + continue + } + switch cc.Type { + case pb.ConfChangeAddNode: + c.makeVoter(cfg, prs, cc.NodeID) + case pb.ConfChangeAddLearnerNode: + c.makeLearner(cfg, prs, cc.NodeID) + case pb.ConfChangeRemoveNode: + c.remove(cfg, prs, cc.NodeID) + case pb.ConfChangeUpdateNode: + default: + return fmt.Errorf("unexpected conf type %d", cc.Type) + } + } + if len(incoming(cfg.Voters)) == 0 { + return errors.New("removed all voters") + } + return nil +} + +// makeVoter adds or promotes the given ID to be a voter in the incoming +// majority config. +func (c Changer) makeVoter(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + pr := prs[id] + if pr == nil { + c.initProgress(cfg, prs, id, false /* isLearner */) + return + } + + pr.IsLearner = false + nilAwareDelete(&cfg.Learners, id) + nilAwareDelete(&cfg.LearnersNext, id) + incoming(cfg.Voters)[id] = struct{}{} + return +} + +// makeLearner makes the given ID a learner or stages it to be a learner once +// an active joint configuration is exited. +// +// The former happens when the peer is not a part of the outgoing config, in +// which case we either add a new learner or demote a voter in the incoming +// config. +// +// The latter case occurs when the configuration is joint and the peer is a +// voter in the outgoing config. In that case, we do not want to add the peer +// as a learner because then we'd have to track a peer as a voter and learner +// simultaneously. Instead, we add the learner to LearnersNext, so that it will +// be added to Learners the moment the outgoing config is removed by +// LeaveJoint(). +func (c Changer) makeLearner(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + pr := prs[id] + if pr == nil { + c.initProgress(cfg, prs, id, true /* isLearner */) + return + } + if pr.IsLearner { + return + } + // Remove any existing voter in the incoming config... + c.remove(cfg, prs, id) + // ... but save the Progress. + prs[id] = pr + // Use LearnersNext if we can't add the learner to Learners directly, i.e. + // if the peer is still tracked as a voter in the outgoing config. It will + // be turned into a learner in LeaveJoint(). + // + // Otherwise, add a regular learner right away. + if _, onRight := outgoing(cfg.Voters)[id]; onRight { + nilAwareAdd(&cfg.LearnersNext, id) + } else { + pr.IsLearner = true + nilAwareAdd(&cfg.Learners, id) + } +} + +// remove this peer as a voter or learner from the incoming config. +func (c Changer) remove(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + if _, ok := prs[id]; !ok { + return + } + + delete(incoming(cfg.Voters), id) + nilAwareDelete(&cfg.Learners, id) + nilAwareDelete(&cfg.LearnersNext, id) + + // If the peer is still a voter in the outgoing config, keep the Progress. + if _, onRight := outgoing(cfg.Voters)[id]; !onRight { + delete(prs, id) + } +} + +// initProgress initializes a new progress for the given node or learner. +func (c Changer) initProgress(cfg *tracker.Config, prs tracker.ProgressMap, id uint64, isLearner bool) { + if !isLearner { + incoming(cfg.Voters)[id] = struct{}{} + } else { + nilAwareAdd(&cfg.Learners, id) + } + prs[id] = &tracker.Progress{ + // Initializing the Progress with the last index means that the follower + // can be probed (with the last index). + // + // TODO(tbg): seems awfully optimistic. Using the first index would be + // better. The general expectation here is that the follower has no log + // at all (and will thus likely need a snapshot), though the app may + // have applied a snapshot out of band before adding the replica (thus + // making the first index the better choice). + Next: c.LastIndex, + Match: 0, + Inflights: tracker.NewInflights(c.Tracker.MaxInflight), + IsLearner: isLearner, + // When a node is first added, we should mark it as recently active. + // Otherwise, CheckQuorum may cause us to step down if it is invoked + // before the added node has had a chance to communicate with us. + RecentActive: true, + } +} + +// checkInvariants makes sure that the config and progress are compatible with +// each other. This is used to check both what the Changer is initialized with, +// as well as what it returns. +func checkInvariants(cfg tracker.Config, prs tracker.ProgressMap) error { + // NB: intentionally allow the empty config. In production we'll never see a + // non-empty config (we prevent it from being created) but we will need to + // be able to *create* an initial config, for example during bootstrap (or + // during tests). Instead of having to hand-code this, we allow + // transitioning from an empty config into any other legal and non-empty + // config. + for _, ids := range []map[uint64]struct{}{ + cfg.Voters.IDs(), + cfg.Learners, + cfg.LearnersNext, + } { + for id := range ids { + if _, ok := prs[id]; !ok { + return fmt.Errorf("no progress for %d", id) + } + } + } + + // Any staged learner was staged because it could not be directly added due + // to a conflicting voter in the outgoing config. + for id := range cfg.LearnersNext { + if _, ok := outgoing(cfg.Voters)[id]; !ok { + return fmt.Errorf("%d is in LearnersNext, but not Voters[1]", id) + } + if prs[id].IsLearner { + return fmt.Errorf("%d is in LearnersNext, but is already marked as learner", id) + } + } + // Conversely Learners and Voters doesn't intersect at all. + for id := range cfg.Learners { + if _, ok := outgoing(cfg.Voters)[id]; ok { + return fmt.Errorf("%d is in Learners and Voters[1]", id) + } + if _, ok := incoming(cfg.Voters)[id]; ok { + return fmt.Errorf("%d is in Learners and Voters[0]", id) + } + if !prs[id].IsLearner { + return fmt.Errorf("%d is in Learners, but is not marked as learner", id) + } + } + + if !joint(cfg) { + // We enforce that empty maps are nil instead of zero. + if outgoing(cfg.Voters) != nil { + return fmt.Errorf("Voters[1] must be nil when not joint") + } + if cfg.LearnersNext != nil { + return fmt.Errorf("LearnersNext must be nil when not joint") + } + if cfg.AutoLeave { + return fmt.Errorf("AutoLeave must be false when not joint") + } + } + + return nil +} + +// checkAndCopy copies the tracker's config and progress map (deeply enough for +// the purposes of the Changer) and returns those copies. It returns an error +// if checkInvariants does. +func (c Changer) checkAndCopy() (tracker.Config, tracker.ProgressMap, error) { + cfg := c.Tracker.Config.Clone() + prs := tracker.ProgressMap{} + + for id, pr := range c.Tracker.Progress { + // A shallow copy is enough because we only mutate the Learner field. + ppr := *pr + prs[id] = &ppr + } + return checkAndReturn(cfg, prs) +} + +// checkAndReturn calls checkInvariants on the input and returns either the +// resulting error or the input. +func checkAndReturn(cfg tracker.Config, prs tracker.ProgressMap) (tracker.Config, tracker.ProgressMap, error) { + if err := checkInvariants(cfg, prs); err != nil { + return tracker.Config{}, tracker.ProgressMap{}, err + } + return cfg, prs, nil +} + +// err returns zero values and an error. +func (c Changer) err(err error) (tracker.Config, tracker.ProgressMap, error) { + return tracker.Config{}, nil, err +} + +// nilAwareAdd populates a map entry, creating the map if necessary. +func nilAwareAdd(m *map[uint64]struct{}, id uint64) { + if *m == nil { + *m = map[uint64]struct{}{} + } + (*m)[id] = struct{}{} +} + +// nilAwareDelete deletes from a map, nil'ing the map itself if it is empty after. +func nilAwareDelete(m *map[uint64]struct{}, id uint64) { + if *m == nil { + return + } + delete(*m, id) + if len(*m) == 0 { + *m = nil + } +} + +// symdiff returns the count of the symmetric difference between the sets of +// uint64s, i.e. len( (l - r) \union (r - l)). +func symdiff(l, r map[uint64]struct{}) int { + var n int + pairs := [][2]quorum.MajorityConfig{ + {l, r}, // count elems in l but not in r + {r, l}, // count elems in r but not in l + } + for _, p := range pairs { + for id := range p[0] { + if _, ok := p[1][id]; !ok { + n++ + } + } + } + return n +} + +func joint(cfg tracker.Config) bool { + return len(outgoing(cfg.Voters)) > 0 +} + +func incoming(voters quorum.JointConfig) quorum.MajorityConfig { return voters[0] } +func outgoing(voters quorum.JointConfig) quorum.MajorityConfig { return voters[1] } +func outgoingPtr(voters *quorum.JointConfig) *quorum.MajorityConfig { return &voters[1] } + +// Describe prints the type and NodeID of the configuration changes as a +// space-delimited string. +func Describe(ccs ...pb.ConfChangeSingle) string { + var buf strings.Builder + for _, cc := range ccs { + if buf.Len() > 0 { + buf.WriteByte(' ') + } + fmt.Fprintf(&buf, "%s(%d)", cc.Type, cc.NodeID) + } + return buf.String() +} diff --git a/vendor/go.etcd.io/etcd/raft/confchange/restore.go b/vendor/go.etcd.io/etcd/raft/confchange/restore.go new file mode 100644 index 0000000000..724068da00 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/confchange/restore.go @@ -0,0 +1,155 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confchange + +import ( + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// toConfChangeSingle translates a conf state into 1) a slice of operations creating +// first the config that will become the outgoing one, and then the incoming one, and +// b) another slice that, when applied to the config resulted from 1), represents the +// ConfState. +func toConfChangeSingle(cs pb.ConfState) (out []pb.ConfChangeSingle, in []pb.ConfChangeSingle) { + // Example to follow along this code: + // voters=(1 2 3) learners=(5) outgoing=(1 2 4 6) learners_next=(4) + // + // This means that before entering the joint config, the configuration + // had voters (1 2 4) and perhaps some learners that are already gone. + // The new set of voters is (1 2 3), i.e. (1 2) were kept around, and (4 6) + // are no longer voters; however 4 is poised to become a learner upon leaving + // the joint state. + // We can't tell whether 5 was a learner before entering the joint config, + // but it doesn't matter (we'll pretend that it wasn't). + // + // The code below will construct + // outgoing = add 1; add 2; add 4; add 6 + // incoming = remove 1; remove 2; remove 4; remove 6 + // add 1; add 2; add 3; + // add-learner 5; + // add-learner 4; + // + // So, when starting with an empty config, after applying 'outgoing' we have + // + // quorum=(1 2 4 6) + // + // From which we enter a joint state via 'incoming' + // + // quorum=(1 2 3)&&(1 2 4 6) learners=(5) learners_next=(4) + // + // as desired. + + for _, id := range cs.VotersOutgoing { + // If there are outgoing voters, first add them one by one so that the + // (non-joint) config has them all. + out = append(out, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddNode, + NodeID: id, + }) + + } + + // We're done constructing the outgoing slice, now on to the incoming one + // (which will apply on top of the config created by the outgoing slice). + + // First, we'll remove all of the outgoing voters. + for _, id := range cs.VotersOutgoing { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeRemoveNode, + NodeID: id, + }) + } + // Then we'll add the incoming voters and learners. + for _, id := range cs.Voters { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddNode, + NodeID: id, + }) + } + for _, id := range cs.Learners { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddLearnerNode, + NodeID: id, + }) + } + // Same for LearnersNext; these are nodes we want to be learners but which + // are currently voters in the outgoing config. + for _, id := range cs.LearnersNext { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddLearnerNode, + NodeID: id, + }) + } + return out, in +} + +func chain(chg Changer, ops ...func(Changer) (tracker.Config, tracker.ProgressMap, error)) (tracker.Config, tracker.ProgressMap, error) { + for _, op := range ops { + cfg, prs, err := op(chg) + if err != nil { + return tracker.Config{}, nil, err + } + chg.Tracker.Config = cfg + chg.Tracker.Progress = prs + } + return chg.Tracker.Config, chg.Tracker.Progress, nil +} + +// Restore takes a Changer (which must represent an empty configuration), and +// runs a sequence of changes enacting the configuration described in the +// ConfState. +// +// TODO(tbg) it's silly that this takes a Changer. Unravel this by making sure +// the Changer only needs a ProgressMap (not a whole Tracker) at which point +// this can just take LastIndex and MaxInflight directly instead and cook up +// the results from that alone. +func Restore(chg Changer, cs pb.ConfState) (tracker.Config, tracker.ProgressMap, error) { + outgoing, incoming := toConfChangeSingle(cs) + + var ops []func(Changer) (tracker.Config, tracker.ProgressMap, error) + + if len(outgoing) == 0 { + // No outgoing config, so just apply the incoming changes one by one. + for _, cc := range incoming { + cc := cc // loop-local copy + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.Simple(cc) + }) + } + } else { + // The ConfState describes a joint configuration. + // + // First, apply all of the changes of the outgoing config one by one, so + // that it temporarily becomes the incoming active config. For example, + // if the config is (1 2 3)&(2 3 4), this will establish (2 3 4)&(). + for _, cc := range outgoing { + cc := cc // loop-local copy + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.Simple(cc) + }) + } + // Now enter the joint state, which rotates the above additions into the + // outgoing config, and adds the incoming config in. Continuing the + // example above, we'd get (1 2 3)&(2 3 4), i.e. the incoming operations + // would be removing 2,3,4 and then adding in 1,2,3 while transitioning + // into a joint state. + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.EnterJoint(cs.AutoLeave, incoming...) + }) + } + + return chain(chg, ops...) +} diff --git a/vendor/go.etcd.io/etcd/raft/design.md b/vendor/go.etcd.io/etcd/raft/design.md new file mode 100644 index 0000000000..7bc0531dce --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/design.md @@ -0,0 +1,57 @@ +## Progress + +Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress. + +`replication message` is a `msgApp` with log entries. + +A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`. + +A progress is in one of the three state: `probe`, `replicate`, `snapshot`. + +``` + +--------------------------------------------------------+ + | send snapshot | + | | + +---------+----------+ +----------v---------+ + +---> probe | | snapshot | + | | max inflight = 1 <----------------------------------+ max inflight = 0 | + | +---------+----------+ +--------------------+ + | | 1. snapshot success + | | (next=snapshot.index + 1) + | | 2. snapshot failure + | | (no change) + | | 3. receives msgAppResp(rej=false&&index>lastsnap.index) + | | (match=m.index,next=match+1) +receives msgAppResp(rej=true) +(next=match+1)| | + | | + | | + | | receives msgAppResp(rej=false&&index>match) + | | (match=m.index,next=match+1) + | | + | | + | | + | +---------v----------+ + | | replicate | + +---+ max inflight = n | + +--------------------+ +``` + +When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`. + +When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower. + +When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`. + +A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress. + +A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low. see open question) + +A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied. + +### Flow Control + +1. limit the max size of message sent per message. Max should be configurable. +Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next` + +2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly. diff --git a/vendor/go.etcd.io/etcd/raft/doc.go b/vendor/go.etcd.io/etcd/raft/doc.go new file mode 100644 index 0000000000..68fe6f0a6e --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/doc.go @@ -0,0 +1,300 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package raft sends and receives messages in the Protocol Buffer format +defined in the raftpb package. + +Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. +The state machine is kept in sync through the use of a replicated log. +For more details on Raft, see "In Search of an Understandable Consensus Algorithm" +(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout. + +A simple example application, _raftexample_, is also available to help illustrate +how to use this package in practice: +https://github.com/etcd-io/etcd/tree/master/contrib/raftexample + +Usage + +The primary object in raft is a Node. You either start a Node from scratch +using raft.StartNode or start a Node from some initial state using raft.RestartNode. + +To start a node from scratch: + + storage := raft.NewMemoryStorage() + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) + +To restart a node from previous state: + + storage := raft.NewMemoryStorage() + + // recover the in-memory storage from persistent + // snapshot, state and entries. + storage.ApplySnapshot(snapshot) + storage.SetHardState(state) + storage.Append(entries) + + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + + // restart raft without peer information. + // peer information is already included in the storage. + n := raft.RestartNode(c) + +Now that you are holding onto a Node you have a few responsibilities: + +First, you must read from the Node.Ready() channel and process the updates +it contains. These steps may be performed in parallel, except as noted in step +2. + +1. Write HardState, Entries, and Snapshot to persistent storage if they are +not empty. Note that when writing an Entry with Index i, any +previously-persisted entries with Index >= i must be discarded. + +2. Send all Messages to the nodes named in the To field. It is important that +no messages be sent until the latest HardState has been persisted to disk, +and all Entries written by any previous Ready batch (Messages may be sent while +entries from the same batch are being persisted). To reduce the I/O latency, an +optimization can be applied to make leader write to disk in parallel with its +followers (as explained at section 10.2.1 in Raft thesis). If any Message has type +MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be +large). + +Note: Marshalling messages is not thread-safe; it is important that you +make sure that no new entries are persisted while marshalling. +The easiest way to achieve this is to serialize the messages directly inside +your main raft loop. + +3. Apply Snapshot (if any) and CommittedEntries to the state machine. +If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() +to apply it to the node. The configuration change may be cancelled at this point +by setting the NodeID field to zero before calling ApplyConfChange +(but ApplyConfChange must be called one way or the other, and the decision to cancel +must be based solely on the state machine and not external information such as +the observed health of the node). + +4. Call Node.Advance() to signal readiness for the next batch of updates. +This may be done at any time after step 1, although all updates must be processed +in the order they were returned by Ready. + +Second, all persisted log entries must be made available via an +implementation of the Storage interface. The provided MemoryStorage +type can be used for this (if you repopulate its state upon a +restart), or you can supply your own disk-backed implementation. + +Third, when you receive a message from another node, pass it to Node.Step: + + func recvRaftRPC(ctx context.Context, m raftpb.Message) { + n.Step(ctx, m) + } + +Finally, you need to call Node.Tick() at regular intervals (probably +via a time.Ticker). Raft has two important timeouts: heartbeat and the +election timeout. However, internally to the raft package time is +represented by an abstract "tick". + +The total state machine handling loop will look something like this: + + for { + select { + case <-s.Ticker: + n.Tick() + case rd := <-s.Node.Ready(): + saveToStorage(rd.State, rd.Entries, rd.Snapshot) + send(rd.Messages) + if !raft.IsEmptySnap(rd.Snapshot) { + processSnapshot(rd.Snapshot) + } + for _, entry := range rd.CommittedEntries { + process(entry) + if entry.Type == raftpb.EntryConfChange { + var cc raftpb.ConfChange + cc.Unmarshal(entry.Data) + s.Node.ApplyConfChange(cc) + } + } + s.Node.Advance() + case <-s.done: + return + } + } + +To propose changes to the state machine from your node take your application +data, serialize it into a byte slice and call: + + n.Propose(ctx, data) + +If the proposal is committed, data will appear in committed entries with type +raftpb.EntryNormal. There is no guarantee that a proposed command will be +committed; you may have to re-propose after a timeout. + +To add or remove a node in a cluster, build ConfChange struct 'cc' and call: + + n.ProposeConfChange(ctx, cc) + +After config change is committed, some committed entry with type +raftpb.EntryConfChange will be returned. You must apply it to node through: + + var cc raftpb.ConfChange + cc.Unmarshal(data) + n.ApplyConfChange(cc) + +Note: An ID represents a unique node in a cluster for all time. A +given ID MUST be used only once even if the old node has been removed. +This means that for example IP addresses make poor node IDs since they +may be reused. Node IDs must be non-zero. + +Implementation notes + +This implementation is up to date with the final Raft thesis +(https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although our +implementation of the membership change protocol differs somewhat from +that described in chapter 4. The key invariant that membership changes +happen one node at a time is preserved, but in our implementation the +membership change takes effect when its entry is applied, not when it +is added to the log (so the entry is committed under the old +membership instead of the new). This is equivalent in terms of safety, +since the old and new configurations are guaranteed to overlap. + +To ensure that we do not attempt to commit two membership changes at +once by matching log positions (which would be unsafe since they +should have different quorum requirements), we simply disallow any +proposed membership change while any uncommitted change appears in +the leader's log. + +This approach introduces a problem when you try to remove a member +from a two-member cluster: If one of the members dies before the +other one receives the commit of the confchange entry, then the member +cannot be removed any more since the cluster cannot make progress. +For this reason it is highly recommended to use three or more nodes in +every cluster. + +MessageType + +Package raft sends and receives message in Protocol Buffer format (defined +in raftpb package). Each state (follower, candidate, leader) implements its +own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when +advancing with the given raftpb.Message. Each step is determined by its +raftpb.MessageType. Note that every step is checked by one common method +'Step' that safety-checks the terms of node and incoming message to prevent +stale log entries: + + 'MsgHup' is used for election. If a node is a follower or candidate, the + 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or + candidate has not received any heartbeat before the election timeout, it + passes 'MsgHup' to its Step method and becomes (or remains) a candidate to + start a new election. + + 'MsgBeat' is an internal type that signals the leader to send a heartbeat of + the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in + the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to + send periodic 'MsgHeartbeat' messages to its followers. + + 'MsgProp' proposes to append data to its log entries. This is a special + type to redirect proposals to leader. Therefore, send method overwrites + raftpb.Message's term with its HardState's term to avoid attaching its + local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step' + method, the leader first calls the 'appendEntry' method to append entries + to its log, and then calls 'bcastAppend' method to send those entries to + its peers. When passed to candidate, 'MsgProp' is dropped. When passed to + follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send + method. It is stored with sender's ID and later forwarded to leader by + rafthttp package. + + 'MsgApp' contains log entries to replicate. A leader calls bcastAppend, + which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp' + type. When 'MsgApp' is passed to candidate's Step method, candidate reverts + back to follower, because it indicates that there is a valid leader sending + 'MsgApp' messages. Candidate and follower respond to this message in + 'MsgAppResp' type. + + 'MsgAppResp' is response to log replication request('MsgApp'). When + 'MsgApp' is passed to candidate or follower's Step method, it responds by + calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft + mailbox. + + 'MsgVote' requests votes for election. When a node is a follower or + candidate and 'MsgHup' is passed to its Step method, then the node calls + 'campaign' method to campaign itself to become a leader. Once 'campaign' + method is called, the node becomes candidate and sends 'MsgVote' to peers + in cluster to request votes. When passed to leader or candidate's Step + method and the message's Term is lower than leader's or candidate's, + 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true). + If leader or candidate receives 'MsgVote' with higher term, it will revert + back to follower. When 'MsgVote' is passed to follower, it votes for the + sender only when sender's last term is greater than MsgVote's term or + sender's last term is equal to MsgVote's term but sender's last committed + index is greater than or equal to follower's. + + 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is + passed to candidate, the candidate calculates how many votes it has won. If + it's more than majority (quorum), it becomes leader and calls 'bcastAppend'. + If candidate receives majority of votes of denials, it reverts back to + follower. + + 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election + protocol. When Config.PreVote is true, a pre-election is carried out first + (using the same rules as a regular election), and no node increases its term + number unless the pre-election indicates that the campaigning node would win. + This minimizes disruption when a partitioned node rejoins the cluster. + + 'MsgSnap' requests to install a snapshot message. When a node has just + become a leader or the leader receives 'MsgProp' message, it calls + 'bcastAppend' method, which then calls 'sendAppend' method to each + follower. In 'sendAppend', if a leader fails to get term or entries, + the leader requests snapshot by sending 'MsgSnap' type message. + + 'MsgSnapStatus' tells the result of snapshot install message. When a + follower rejected 'MsgSnap', it indicates the snapshot request with + 'MsgSnap' had failed from network issues which causes the network layer + to fail to send out snapshots to its followers. Then leader considers + follower's progress as probe. When 'MsgSnap' were not rejected, it + indicates that the snapshot succeeded and the leader sets follower's + progress to probe and resumes its log replication. + + 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed + to candidate and message's term is higher than candidate's, the candidate + reverts back to follower and updates its committed index from the one in + this heartbeat. And it sends the message to its mailbox. When + 'MsgHeartbeat' is passed to follower's Step method and message's term is + higher than follower's, the follower updates its leaderID with the ID + from the message. + + 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp' + is passed to leader's Step method, the leader knows which follower + responded. And only when the leader's last committed index is greater than + follower's Match index, the leader runs 'sendAppend` method. + + 'MsgUnreachable' tells that request(message) wasn't delivered. When + 'MsgUnreachable' is passed to leader's Step method, the leader discovers + that the follower that sent this 'MsgUnreachable' is not reachable, often + indicating 'MsgApp' is lost. When follower's progress state is replicate, + the leader sets it back to probe. + +*/ +package raft diff --git a/vendor/go.etcd.io/etcd/raft/log.go b/vendor/go.etcd.io/etcd/raft/log.go new file mode 100644 index 0000000000..77eedfccba --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/log.go @@ -0,0 +1,372 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "log" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +type raftLog struct { + // storage contains all stable entries since the last snapshot. + storage Storage + + // unstable contains all unstable entries and snapshot. + // they will be saved into storage. + unstable unstable + + // committed is the highest log position that is known to be in + // stable storage on a quorum of nodes. + committed uint64 + // applied is the highest log position that the application has + // been instructed to apply to its state machine. + // Invariant: applied <= committed + applied uint64 + + logger Logger + + // maxNextEntsSize is the maximum number aggregate byte size of the messages + // returned from calls to nextEnts. + maxNextEntsSize uint64 +} + +// newLog returns log using the given storage and default options. It +// recovers the log to the state that it just commits and applies the +// latest snapshot. +func newLog(storage Storage, logger Logger) *raftLog { + return newLogWithSize(storage, logger, noLimit) +} + +// newLogWithSize returns a log using the given storage and max +// message size. +func newLogWithSize(storage Storage, logger Logger, maxNextEntsSize uint64) *raftLog { + if storage == nil { + log.Panic("storage must not be nil") + } + log := &raftLog{ + storage: storage, + logger: logger, + maxNextEntsSize: maxNextEntsSize, + } + firstIndex, err := storage.FirstIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + lastIndex, err := storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + log.unstable.offset = lastIndex + 1 + log.unstable.logger = logger + // Initialize our committed and applied pointers to the time of the last compaction. + log.committed = firstIndex - 1 + log.applied = firstIndex - 1 + + return log +} + +func (l *raftLog) String() string { + return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries)) +} + +// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise, +// it returns (last index of new entries, true). +func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { + if l.matchTerm(index, logTerm) { + lastnewi = index + uint64(len(ents)) + ci := l.findConflict(ents) + switch { + case ci == 0: + case ci <= l.committed: + l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) + default: + offset := index + 1 + l.append(ents[ci-offset:]...) + } + l.commitTo(min(committed, lastnewi)) + return lastnewi, true + } + return 0, false +} + +func (l *raftLog) append(ents ...pb.Entry) uint64 { + if len(ents) == 0 { + return l.lastIndex() + } + if after := ents[0].Index - 1; after < l.committed { + l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed) + } + l.unstable.truncateAndAppend(ents) + return l.lastIndex() +} + +// findConflict finds the index of the conflict. +// It returns the first pair of conflicting entries between the existing +// entries and the given entries, if there are any. +// If there is no conflicting entries, and the existing entries contains +// all the given entries, zero will be returned. +// If there is no conflicting entries, but the given entries contains new +// entries, the index of the first new entry will be returned. +// An entry is considered to be conflicting if it has the same index but +// a different term. +// The first entry MUST have an index equal to the argument 'from'. +// The index of the given entries MUST be continuously increasing. +func (l *raftLog) findConflict(ents []pb.Entry) uint64 { + for _, ne := range ents { + if !l.matchTerm(ne.Index, ne.Term) { + if ne.Index <= l.lastIndex() { + l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]", + ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term) + } + return ne.Index + } + } + return 0 +} + +func (l *raftLog) unstableEntries() []pb.Entry { + if len(l.unstable.entries) == 0 { + return nil + } + return l.unstable.entries +} + +// nextEnts returns all the available entries for execution. +// If applied is smaller than the index of snapshot, it returns all committed +// entries after the index of snapshot. +func (l *raftLog) nextEnts() (ents []pb.Entry) { + off := max(l.applied+1, l.firstIndex()) + if l.committed+1 > off { + ents, err := l.slice(off, l.committed+1, l.maxNextEntsSize) + if err != nil { + l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err) + } + return ents + } + return nil +} + +// hasNextEnts returns if there is any available entries for execution. This +// is a fast check without heavy raftLog.slice() in raftLog.nextEnts(). +func (l *raftLog) hasNextEnts() bool { + off := max(l.applied+1, l.firstIndex()) + return l.committed+1 > off +} + +func (l *raftLog) snapshot() (pb.Snapshot, error) { + if l.unstable.snapshot != nil { + return *l.unstable.snapshot, nil + } + return l.storage.Snapshot() +} + +func (l *raftLog) firstIndex() uint64 { + if i, ok := l.unstable.maybeFirstIndex(); ok { + return i + } + index, err := l.storage.FirstIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + return index +} + +func (l *raftLog) lastIndex() uint64 { + if i, ok := l.unstable.maybeLastIndex(); ok { + return i + } + i, err := l.storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + return i +} + +func (l *raftLog) commitTo(tocommit uint64) { + // never decrease commit + if l.committed < tocommit { + if l.lastIndex() < tocommit { + l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex()) + } + l.committed = tocommit + } +} + +func (l *raftLog) appliedTo(i uint64) { + if i == 0 { + return + } + if l.committed < i || i < l.applied { + l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed) + } + l.applied = i +} + +func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) } + +func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) } + +func (l *raftLog) lastTerm() uint64 { + t, err := l.term(l.lastIndex()) + if err != nil { + l.logger.Panicf("unexpected error when getting the last term (%v)", err) + } + return t +} + +func (l *raftLog) term(i uint64) (uint64, error) { + // the valid term range is [index of dummy entry, last index] + dummyIndex := l.firstIndex() - 1 + if i < dummyIndex || i > l.lastIndex() { + // TODO: return an error instead? + return 0, nil + } + + if t, ok := l.unstable.maybeTerm(i); ok { + return t, nil + } + + t, err := l.storage.Term(i) + if err == nil { + return t, nil + } + if err == ErrCompacted || err == ErrUnavailable { + return 0, err + } + panic(err) // TODO(bdarnell) +} + +func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) { + if i > l.lastIndex() { + return nil, nil + } + return l.slice(i, l.lastIndex()+1, maxsize) +} + +// allEntries returns all entries in the log. +func (l *raftLog) allEntries() []pb.Entry { + ents, err := l.entries(l.firstIndex(), noLimit) + if err == nil { + return ents + } + if err == ErrCompacted { // try again if there was a racing compaction + return l.allEntries() + } + // TODO (xiangli): handle error? + panic(err) +} + +// isUpToDate determines if the given (lastIndex,term) log is more up-to-date +// by comparing the index and term of the last entries in the existing logs. +// If the logs have last entries with different terms, then the log with the +// later term is more up-to-date. If the logs end with the same term, then +// whichever log has the larger lastIndex is more up-to-date. If the logs are +// the same, the given log is up-to-date. +func (l *raftLog) isUpToDate(lasti, term uint64) bool { + return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex()) +} + +func (l *raftLog) matchTerm(i, term uint64) bool { + t, err := l.term(i) + if err != nil { + return false + } + return t == term +} + +func (l *raftLog) maybeCommit(maxIndex, term uint64) bool { + if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term { + l.commitTo(maxIndex) + return true + } + return false +} + +func (l *raftLog) restore(s pb.Snapshot) { + l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term) + l.committed = s.Metadata.Index + l.unstable.restore(s) +} + +// slice returns a slice of log entries from lo through hi-1, inclusive. +func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { + err := l.mustCheckOutOfBounds(lo, hi) + if err != nil { + return nil, err + } + if lo == hi { + return nil, nil + } + var ents []pb.Entry + if lo < l.unstable.offset { + storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize) + if err == ErrCompacted { + return nil, err + } else if err == ErrUnavailable { + l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset)) + } else if err != nil { + panic(err) // TODO(bdarnell) + } + + // check if ents has reached the size limitation + if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo { + return storedEnts, nil + } + + ents = storedEnts + } + if hi > l.unstable.offset { + unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) + if len(ents) > 0 { + combined := make([]pb.Entry, len(ents)+len(unstable)) + n := copy(combined, ents) + copy(combined[n:], unstable) + ents = combined + } else { + ents = unstable + } + } + return limitSize(ents, maxSize), nil +} + +// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries) +func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error { + if lo > hi { + l.logger.Panicf("invalid slice %d > %d", lo, hi) + } + fi := l.firstIndex() + if lo < fi { + return ErrCompacted + } + + length := l.lastIndex() + 1 - fi + if lo < fi || hi > fi+length { + l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex()) + } + return nil +} + +func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 { + if err == nil { + return t + } + if err == ErrCompacted { + return 0 + } + l.logger.Panicf("unexpected error (%v)", err) + return 0 +} diff --git a/vendor/go.etcd.io/etcd/raft/log_unstable.go b/vendor/go.etcd.io/etcd/raft/log_unstable.go new file mode 100644 index 0000000000..1bff5a7bdc --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/log_unstable.go @@ -0,0 +1,157 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "go.etcd.io/etcd/raft/raftpb" + +// unstable.entries[i] has raft log position i+unstable.offset. +// Note that unstable.offset may be less than the highest log +// position in storage; this means that the next write to storage +// might need to truncate the log before persisting unstable.entries. +type unstable struct { + // the incoming unstable snapshot, if any. + snapshot *pb.Snapshot + // all entries that have not yet been written to storage. + entries []pb.Entry + offset uint64 + + logger Logger +} + +// maybeFirstIndex returns the index of the first possible entry in entries +// if it has a snapshot. +func (u *unstable) maybeFirstIndex() (uint64, bool) { + if u.snapshot != nil { + return u.snapshot.Metadata.Index + 1, true + } + return 0, false +} + +// maybeLastIndex returns the last index if it has at least one +// unstable entry or snapshot. +func (u *unstable) maybeLastIndex() (uint64, bool) { + if l := len(u.entries); l != 0 { + return u.offset + uint64(l) - 1, true + } + if u.snapshot != nil { + return u.snapshot.Metadata.Index, true + } + return 0, false +} + +// maybeTerm returns the term of the entry at index i, if there +// is any. +func (u *unstable) maybeTerm(i uint64) (uint64, bool) { + if i < u.offset { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { + return u.snapshot.Metadata.Term, true + } + return 0, false + } + + last, ok := u.maybeLastIndex() + if !ok { + return 0, false + } + if i > last { + return 0, false + } + + return u.entries[i-u.offset].Term, true +} + +func (u *unstable) stableTo(i, t uint64) { + gt, ok := u.maybeTerm(i) + if !ok { + return + } + // if i < offset, term is matched with the snapshot + // only update the unstable entries if term is matched with + // an unstable entry. + if gt == t && i >= u.offset { + u.entries = u.entries[i+1-u.offset:] + u.offset = i + 1 + u.shrinkEntriesArray() + } +} + +// shrinkEntriesArray discards the underlying array used by the entries slice +// if most of it isn't being used. This avoids holding references to a bunch of +// potentially large entries that aren't needed anymore. Simply clearing the +// entries wouldn't be safe because clients might still be using them. +func (u *unstable) shrinkEntriesArray() { + // We replace the array if we're using less than half of the space in + // it. This number is fairly arbitrary, chosen as an attempt to balance + // memory usage vs number of allocations. It could probably be improved + // with some focused tuning. + const lenMultiple = 2 + if len(u.entries) == 0 { + u.entries = nil + } else if len(u.entries)*lenMultiple < cap(u.entries) { + newEntries := make([]pb.Entry, len(u.entries)) + copy(newEntries, u.entries) + u.entries = newEntries + } +} + +func (u *unstable) stableSnapTo(i uint64) { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { + u.snapshot = nil + } +} + +func (u *unstable) restore(s pb.Snapshot) { + u.offset = s.Metadata.Index + 1 + u.entries = nil + u.snapshot = &s +} + +func (u *unstable) truncateAndAppend(ents []pb.Entry) { + after := ents[0].Index + switch { + case after == u.offset+uint64(len(u.entries)): + // after is the next index in the u.entries + // directly append + u.entries = append(u.entries, ents...) + case after <= u.offset: + u.logger.Infof("replace the unstable entries from index %d", after) + // The log is being truncated to before our current offset + // portion, so set the offset and replace the entries + u.offset = after + u.entries = ents + default: + // truncate to after and copy to u.entries + // then append + u.logger.Infof("truncate the unstable entries before index %d", after) + u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...) + u.entries = append(u.entries, ents...) + } +} + +func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { + u.mustCheckOutOfBounds(lo, hi) + return u.entries[lo-u.offset : hi-u.offset] +} + +// u.offset <= lo <= hi <= u.offset+len(u.entries) +func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { + if lo > hi { + u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi) + } + upper := u.offset + uint64(len(u.entries)) + if lo < u.offset || hi > upper { + u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper) + } +} diff --git a/vendor/go.etcd.io/etcd/raft/logger.go b/vendor/go.etcd.io/etcd/raft/logger.go new file mode 100644 index 0000000000..6d89629650 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/logger.go @@ -0,0 +1,132 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "sync" +) + +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + + Info(v ...interface{}) + Infof(format string, v ...interface{}) + + Warning(v ...interface{}) + Warningf(format string, v ...interface{}) + + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) +} + +func SetLogger(l Logger) { + raftLoggerMu.Lock() + raftLogger = l + raftLoggerMu.Unlock() +} + +var ( + defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} + discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)} + raftLoggerMu sync.Mutex + raftLogger = Logger(defaultLogger) +) + +const ( + calldepth = 2 +) + +// DefaultLogger is a default implementation of the Logger interface. +type DefaultLogger struct { + *log.Logger + debug bool +} + +func (l *DefaultLogger) EnableTimestamps() { + l.SetFlags(l.Flags() | log.Ldate | log.Ltime) +} + +func (l *DefaultLogger) EnableDebug() { + l.debug = true +} + +func (l *DefaultLogger) Debug(v ...interface{}) { + if l.debug { + l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) + } +} + +func (l *DefaultLogger) Debugf(format string, v ...interface{}) { + if l.debug { + l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) + } +} + +func (l *DefaultLogger) Info(v ...interface{}) { + l.Output(calldepth, header("INFO", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Infof(format string, v ...interface{}) { + l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Error(v ...interface{}) { + l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Errorf(format string, v ...interface{}) { + l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Warning(v ...interface{}) { + l.Output(calldepth, header("WARN", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Warningf(format string, v ...interface{}) { + l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Fatal(v ...interface{}) { + l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { + l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Panic(v ...interface{}) { + l.Logger.Panic(v...) +} + +func (l *DefaultLogger) Panicf(format string, v ...interface{}) { + l.Logger.Panicf(format, v...) +} + +func header(lvl, msg string) string { + return fmt.Sprintf("%s: %s", lvl, msg) +} diff --git a/vendor/go.etcd.io/etcd/raft/node.go b/vendor/go.etcd.io/etcd/raft/node.go new file mode 100644 index 0000000000..ab6185b99e --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/node.go @@ -0,0 +1,584 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "context" + "errors" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +type SnapshotStatus int + +const ( + SnapshotFinish SnapshotStatus = 1 + SnapshotFailure SnapshotStatus = 2 +) + +var ( + emptyState = pb.HardState{} + + // ErrStopped is returned by methods on Nodes that have been stopped. + ErrStopped = errors.New("raft: stopped") +) + +// SoftState provides state that is useful for logging and debugging. +// The state is volatile and does not need to be persisted to the WAL. +type SoftState struct { + Lead uint64 // must use atomic operations to access; keep 64-bit aligned. + RaftState StateType +} + +func (a *SoftState) equal(b *SoftState) bool { + return a.Lead == b.Lead && a.RaftState == b.RaftState +} + +// Ready encapsulates the entries and messages that are ready to read, +// be saved to stable storage, committed or sent to other peers. +// All fields in Ready are read-only. +type Ready struct { + // The current volatile state of a Node. + // SoftState will be nil if there is no update. + // It is not required to consume or store SoftState. + *SoftState + + // The current state of a Node to be saved to stable storage BEFORE + // Messages are sent. + // HardState will be equal to empty state if there is no update. + pb.HardState + + // ReadStates can be used for node to serve linearizable read requests locally + // when its applied index is greater than the index in ReadState. + // Note that the readState will be returned when raft receives msgReadIndex. + // The returned is only valid for the request that requested to read. + ReadStates []ReadState + + // Entries specifies entries to be saved to stable storage BEFORE + // Messages are sent. + Entries []pb.Entry + + // Snapshot specifies the snapshot to be saved to stable storage. + Snapshot pb.Snapshot + + // CommittedEntries specifies entries to be committed to a + // store/state-machine. These have previously been committed to stable + // store. + CommittedEntries []pb.Entry + + // Messages specifies outbound messages to be sent AFTER Entries are + // committed to stable storage. + // If it contains a MsgSnap message, the application MUST report back to raft + // when the snapshot has been received or has failed by calling ReportSnapshot. + Messages []pb.Message + + // MustSync indicates whether the HardState and Entries must be synchronously + // written to disk or if an asynchronous write is permissible. + MustSync bool +} + +func isHardStateEqual(a, b pb.HardState) bool { + return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit +} + +// IsEmptyHardState returns true if the given HardState is empty. +func IsEmptyHardState(st pb.HardState) bool { + return isHardStateEqual(st, emptyState) +} + +// IsEmptySnap returns true if the given Snapshot is empty. +func IsEmptySnap(sp pb.Snapshot) bool { + return sp.Metadata.Index == 0 +} + +func (rd Ready) containsUpdates() bool { + return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) || + !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 || + len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0 +} + +// appliedCursor extracts from the Ready the highest index the client has +// applied (once the Ready is confirmed via Advance). If no information is +// contained in the Ready, returns zero. +func (rd Ready) appliedCursor() uint64 { + if n := len(rd.CommittedEntries); n > 0 { + return rd.CommittedEntries[n-1].Index + } + if index := rd.Snapshot.Metadata.Index; index > 0 { + return index + } + return 0 +} + +// Node represents a node in a raft cluster. +type Node interface { + // Tick increments the internal logical clock for the Node by a single tick. Election + // timeouts and heartbeat timeouts are in units of ticks. + Tick() + // Campaign causes the Node to transition to candidate state and start campaigning to become leader. + Campaign(ctx context.Context) error + // Propose proposes that data be appended to the log. Note that proposals can be lost without + // notice, therefore it is user's job to ensure proposal retries. + Propose(ctx context.Context, data []byte) error + // ProposeConfChange proposes a configuration change. Like any proposal, the + // configuration change may be dropped with or without an error being + // returned. In particular, configuration changes are dropped unless the + // leader has certainty that there is no prior unapplied configuration + // change in its log. + // + // The method accepts either a pb.ConfChange (deprecated) or pb.ConfChangeV2 + // message. The latter allows arbitrary configuration changes via joint + // consensus, notably including replacing a voter. Passing a ConfChangeV2 + // message is only allowed if all Nodes participating in the cluster run a + // version of this library aware of the V2 API. See pb.ConfChangeV2 for + // usage details and semantics. + ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error + + // Step advances the state machine using the given message. ctx.Err() will be returned, if any. + Step(ctx context.Context, msg pb.Message) error + + // Ready returns a channel that returns the current point-in-time state. + // Users of the Node must call Advance after retrieving the state returned by Ready. + // + // NOTE: No committed entries from the next Ready may be applied until all committed entries + // and snapshots from the previous one have finished. + Ready() <-chan Ready + + // Advance notifies the Node that the application has saved progress up to the last Ready. + // It prepares the node to return the next available Ready. + // + // The application should generally call Advance after it applies the entries in last Ready. + // + // However, as an optimization, the application may call Advance while it is applying the + // commands. For example. when the last Ready contains a snapshot, the application might take + // a long time to apply the snapshot data. To continue receiving Ready without blocking raft + // progress, it can call Advance before finishing applying the last ready. + Advance() + // ApplyConfChange applies a config change (previously passed to + // ProposeConfChange) to the node. This must be called whenever a config + // change is observed in Ready.CommittedEntries. + // + // Returns an opaque non-nil ConfState protobuf which must be recorded in + // snapshots. + ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState + + // TransferLeadership attempts to transfer leadership to the given transferee. + TransferLeadership(ctx context.Context, lead, transferee uint64) + + // ReadIndex request a read state. The read state will be set in the ready. + // Read state has a read index. Once the application advances further than the read + // index, any linearizable read requests issued before the read request can be + // processed safely. The read state will have the same rctx attached. + ReadIndex(ctx context.Context, rctx []byte) error + + // Status returns the current status of the raft state machine. + Status() Status + // ReportUnreachable reports the given node is not reachable for the last send. + ReportUnreachable(id uint64) + // ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower + // who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure. + // Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a + // snapshot (for e.g., while streaming it from leader to follower), should be reported to the + // leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft + // log probes until the follower can apply the snapshot and advance its state. If the follower + // can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any + // updates from the leader. Therefore, it is crucial that the application ensures that any + // failure in snapshot sending is caught and reported back to the leader; so it can resume raft + // log probing in the follower. + ReportSnapshot(id uint64, status SnapshotStatus) + // Stop performs any necessary termination of the Node. + Stop() +} + +type Peer struct { + ID uint64 + Context []byte +} + +// StartNode returns a new Node given configuration and a list of raft peers. +// It appends a ConfChangeAddNode entry for each given peer to the initial log. +// +// Peers must not be zero length; call RestartNode in that case. +func StartNode(c *Config, peers []Peer) Node { + if len(peers) == 0 { + panic("no peers given; use RestartNode instead") + } + rn, err := NewRawNode(c) + if err != nil { + panic(err) + } + rn.Bootstrap(peers) + + n := newNode(rn) + + go n.run() + return &n +} + +// RestartNode is similar to StartNode but does not take a list of peers. +// The current membership of the cluster will be restored from the Storage. +// If the caller has an existing state machine, pass in the last log index that +// has been applied to it; otherwise use zero. +func RestartNode(c *Config) Node { + rn, err := NewRawNode(c) + if err != nil { + panic(err) + } + n := newNode(rn) + go n.run() + return &n +} + +type msgWithResult struct { + m pb.Message + result chan error +} + +// node is the canonical implementation of the Node interface +type node struct { + propc chan msgWithResult + recvc chan pb.Message + confc chan pb.ConfChangeV2 + confstatec chan pb.ConfState + readyc chan Ready + advancec chan struct{} + tickc chan struct{} + done chan struct{} + stop chan struct{} + status chan chan Status + + rn *RawNode +} + +func newNode(rn *RawNode) node { + return node{ + propc: make(chan msgWithResult), + recvc: make(chan pb.Message), + confc: make(chan pb.ConfChangeV2), + confstatec: make(chan pb.ConfState), + readyc: make(chan Ready), + advancec: make(chan struct{}), + // make tickc a buffered chan, so raft node can buffer some ticks when the node + // is busy processing raft messages. Raft node will resume process buffered + // ticks when it becomes idle. + tickc: make(chan struct{}, 128), + done: make(chan struct{}), + stop: make(chan struct{}), + status: make(chan chan Status), + rn: rn, + } +} + +func (n *node) Stop() { + select { + case n.stop <- struct{}{}: + // Not already stopped, so trigger it + case <-n.done: + // Node has already been stopped - no need to do anything + return + } + // Block until the stop has been acknowledged by run() + <-n.done +} + +func (n *node) run() { + var propc chan msgWithResult + var readyc chan Ready + var advancec chan struct{} + var rd Ready + + r := n.rn.raft + + lead := None + + for { + if advancec != nil { + readyc = nil + } else if n.rn.HasReady() { + // Populate a Ready. Note that this Ready is not guaranteed to + // actually be handled. We will arm readyc, but there's no guarantee + // that we will actually send on it. It's possible that we will + // service another channel instead, loop around, and then populate + // the Ready again. We could instead force the previous Ready to be + // handled first, but it's generally good to emit larger Readys plus + // it simplifies testing (by emitting less frequently and more + // predictably). + rd = n.rn.readyWithoutAccept() + readyc = n.readyc + } + + if lead != r.lead { + if r.hasLeader() { + if lead == None { + r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term) + } else { + r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term) + } + propc = n.propc + } else { + r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term) + propc = nil + } + lead = r.lead + } + + select { + // TODO: maybe buffer the config propose if there exists one (the way + // described in raft dissertation) + // Currently it is dropped in Step silently. + case pm := <-propc: + m := pm.m + m.From = r.id + err := r.Step(m) + if pm.result != nil { + pm.result <- err + close(pm.result) + } + case m := <-n.recvc: + // filter out response message from unknown From. + if pr := r.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) { + r.Step(m) + } + case cc := <-n.confc: + _, okBefore := r.prs.Progress[r.id] + cs := r.applyConfChange(cc) + // If the node was removed, block incoming proposals. Note that we + // only do this if the node was in the config before. Nodes may be + // a member of the group without knowing this (when they're catching + // up on the log and don't have the latest config) and we don't want + // to block the proposal channel in that case. + // + // NB: propc is reset when the leader changes, which, if we learn + // about it, sort of implies that we got readded, maybe? This isn't + // very sound and likely has bugs. + if _, okAfter := r.prs.Progress[r.id]; okBefore && !okAfter { + var found bool + for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} { + for _, id := range sl { + if id == r.id { + found = true + } + } + } + if !found { + propc = nil + } + } + select { + case n.confstatec <- cs: + case <-n.done: + } + case <-n.tickc: + n.rn.Tick() + case readyc <- rd: + n.rn.acceptReady(rd) + advancec = n.advancec + case <-advancec: + n.rn.Advance(rd) + rd = Ready{} + advancec = nil + case c := <-n.status: + c <- getStatus(r) + case <-n.stop: + close(n.done) + return + } + } +} + +// Tick increments the internal logical clock for this Node. Election timeouts +// and heartbeat timeouts are in units of ticks. +func (n *node) Tick() { + select { + case n.tickc <- struct{}{}: + case <-n.done: + default: + n.rn.raft.logger.Warningf("%x (leader %v) A tick missed to fire. Node blocks too long!", n.rn.raft.id, n.rn.raft.id == n.rn.raft.lead) + } +} + +func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) } + +func (n *node) Propose(ctx context.Context, data []byte) error { + return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) +} + +func (n *node) Step(ctx context.Context, m pb.Message) error { + // ignore unexpected local messages receiving over network + if IsLocalMsg(m.Type) { + // TODO: return an error? + return nil + } + return n.step(ctx, m) +} + +func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) { + typ, data, err := pb.MarshalConfChange(c) + if err != nil { + return pb.Message{}, err + } + return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil +} + +func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error { + msg, err := confChangeToMsg(cc) + if err != nil { + return err + } + return n.Step(ctx, msg) +} + +func (n *node) step(ctx context.Context, m pb.Message) error { + return n.stepWithWaitOption(ctx, m, false) +} + +func (n *node) stepWait(ctx context.Context, m pb.Message) error { + return n.stepWithWaitOption(ctx, m, true) +} + +// Step advances the state machine using msgs. The ctx.Err() will be returned, +// if any. +func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error { + if m.Type != pb.MsgProp { + select { + case n.recvc <- m: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + } + ch := n.propc + pm := msgWithResult{m: m} + if wait { + pm.result = make(chan error, 1) + } + select { + case ch <- pm: + if !wait { + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + select { + case err := <-pm.result: + if err != nil { + return err + } + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + return nil +} + +func (n *node) Ready() <-chan Ready { return n.readyc } + +func (n *node) Advance() { + select { + case n.advancec <- struct{}{}: + case <-n.done: + } +} + +func (n *node) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState { + var cs pb.ConfState + select { + case n.confc <- cc.AsV2(): + case <-n.done: + } + select { + case cs = <-n.confstatec: + case <-n.done: + } + return &cs +} + +func (n *node) Status() Status { + c := make(chan Status) + select { + case n.status <- c: + return <-c + case <-n.done: + return Status{} + } +} + +func (n *node) ReportUnreachable(id uint64) { + select { + case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}: + case <-n.done: + } +} + +func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + select { + case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}: + case <-n.done: + } +} + +func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) { + select { + // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership + case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}: + case <-n.done: + case <-ctx.Done(): + } +} + +func (n *node) ReadIndex(ctx context.Context, rctx []byte) error { + return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) +} + +func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { + rd := Ready{ + Entries: r.raftLog.unstableEntries(), + CommittedEntries: r.raftLog.nextEnts(), + Messages: r.msgs, + } + if softSt := r.softState(); !softSt.equal(prevSoftSt) { + rd.SoftState = softSt + } + if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) { + rd.HardState = hardSt + } + if r.raftLog.unstable.snapshot != nil { + rd.Snapshot = *r.raftLog.unstable.snapshot + } + if len(r.readStates) != 0 { + rd.ReadStates = r.readStates + } + rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries)) + return rd +} + +// MustSync returns true if the hard state and count of Raft entries indicate +// that a synchronous write to persistent storage is required. +func MustSync(st, prevst pb.HardState, entsnum int) bool { + // Persistent state on all servers: + // (Updated on stable storage before responding to RPCs) + // currentTerm + // votedFor + // log entries[] + return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term +} diff --git a/vendor/go.etcd.io/etcd/raft/quorum/joint.go b/vendor/go.etcd.io/etcd/raft/quorum/joint.go new file mode 100644 index 0000000000..e3741e0b0a --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/quorum/joint.go @@ -0,0 +1,75 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +// JointConfig is a configuration of two groups of (possibly overlapping) +// majority configurations. Decisions require the support of both majorities. +type JointConfig [2]MajorityConfig + +func (c JointConfig) String() string { + if len(c[1]) > 0 { + return c[0].String() + "&&" + c[1].String() + } + return c[0].String() +} + +// IDs returns a newly initialized map representing the set of voters present +// in the joint configuration. +func (c JointConfig) IDs() map[uint64]struct{} { + m := map[uint64]struct{}{} + for _, cc := range c { + for id := range cc { + m[id] = struct{}{} + } + } + return m +} + +// Describe returns a (multi-line) representation of the commit indexes for the +// given lookuper. +func (c JointConfig) Describe(l AckedIndexer) string { + return MajorityConfig(c.IDs()).Describe(l) +} + +// CommittedIndex returns the largest committed index for the given joint +// quorum. An index is jointly committed if it is committed in both constituent +// majorities. +func (c JointConfig) CommittedIndex(l AckedIndexer) Index { + idx0 := c[0].CommittedIndex(l) + idx1 := c[1].CommittedIndex(l) + if idx0 < idx1 { + return idx0 + } + return idx1 +} + +// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns +// a result indicating whether the vote is pending, lost, or won. A joint quorum +// requires both majority quorums to vote in favor. +func (c JointConfig) VoteResult(votes map[uint64]bool) VoteResult { + r1 := c[0].VoteResult(votes) + r2 := c[1].VoteResult(votes) + + if r1 == r2 { + // If they agree, return the agreed state. + return r1 + } + if r1 == VoteLost || r2 == VoteLost { + // If either config has lost, loss is the only possible outcome. + return VoteLost + } + // One side won, the other one is pending, so the whole outcome is. + return VotePending +} diff --git a/vendor/go.etcd.io/etcd/raft/quorum/majority.go b/vendor/go.etcd.io/etcd/raft/quorum/majority.go new file mode 100644 index 0000000000..8858a36b63 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/quorum/majority.go @@ -0,0 +1,210 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +import ( + "fmt" + "math" + "sort" + "strings" +) + +// MajorityConfig is a set of IDs that uses majority quorums to make decisions. +type MajorityConfig map[uint64]struct{} + +func (c MajorityConfig) String() string { + sl := make([]uint64, 0, len(c)) + for id := range c { + sl = append(sl, id) + } + sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] }) + var buf strings.Builder + buf.WriteByte('(') + for i := range sl { + if i > 0 { + buf.WriteByte(' ') + } + fmt.Fprint(&buf, sl[i]) + } + buf.WriteByte(')') + return buf.String() +} + +// Describe returns a (multi-line) representation of the commit indexes for the +// given lookuper. +func (c MajorityConfig) Describe(l AckedIndexer) string { + if len(c) == 0 { + return "" + } + type tup struct { + id uint64 + idx Index + ok bool // idx found? + bar int // length of bar displayed for this tup + } + + // Below, populate .bar so that the i-th largest commit index has bar i (we + // plot this as sort of a progress bar). The actual code is a bit more + // complicated and also makes sure that equal index => equal bar. + + n := len(c) + info := make([]tup, 0, n) + for id := range c { + idx, ok := l.AckedIndex(id) + info = append(info, tup{id: id, idx: idx, ok: ok}) + } + + // Sort by index + sort.Slice(info, func(i, j int) bool { + if info[i].idx == info[j].idx { + return info[i].id < info[j].id + } + return info[i].idx < info[j].idx + }) + + // Populate .bar. + for i := range info { + if i > 0 && info[i-1].idx < info[i].idx { + info[i].bar = i + } + } + + // Sort by ID. + sort.Slice(info, func(i, j int) bool { + return info[i].id < info[j].id + }) + + var buf strings.Builder + + // Print. + fmt.Fprint(&buf, strings.Repeat(" ", n)+" idx\n") + for i := range info { + bar := info[i].bar + if !info[i].ok { + fmt.Fprint(&buf, "?"+strings.Repeat(" ", n)) + } else { + fmt.Fprint(&buf, strings.Repeat("x", bar)+">"+strings.Repeat(" ", n-bar)) + } + fmt.Fprintf(&buf, " %5d (id=%d)\n", info[i].idx, info[i].id) + } + return buf.String() +} + +// Slice returns the MajorityConfig as a sorted slice. +func (c MajorityConfig) Slice() []uint64 { + var sl []uint64 + for id := range c { + sl = append(sl, id) + } + sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] }) + return sl +} + +func insertionSort(sl []uint64) { + a, b := 0, len(sl) + for i := a + 1; i < b; i++ { + for j := i; j > a && sl[j] < sl[j-1]; j-- { + sl[j], sl[j-1] = sl[j-1], sl[j] + } + } +} + +// CommittedIndex computes the committed index from those supplied via the +// provided AckedIndexer (for the active config). +func (c MajorityConfig) CommittedIndex(l AckedIndexer) Index { + n := len(c) + if n == 0 { + // This plays well with joint quorums which, when one half is the zero + // MajorityConfig, should behave like the other half. + return math.MaxUint64 + } + + // Use an on-stack slice to collect the committed indexes when n <= 7 + // (otherwise we alloc). The alternative is to stash a slice on + // MajorityConfig, but this impairs usability (as is, MajorityConfig is just + // a map, and that's nice). The assumption is that running with a + // replication factor of >7 is rare, and in cases in which it happens + // performance is a lesser concern (additionally the performance + // implications of an allocation here are far from drastic). + var stk [7]uint64 + var srt []uint64 + if len(stk) >= n { + srt = stk[:n] + } else { + srt = make([]uint64, n) + } + + { + // Fill the slice with the indexes observed. Any unused slots will be + // left as zero; these correspond to voters that may report in, but + // haven't yet. We fill from the right (since the zeroes will end up on + // the left after sorting below anyway). + i := n - 1 + for id := range c { + if idx, ok := l.AckedIndex(id); ok { + srt[i] = uint64(idx) + i-- + } + } + } + + // Sort by index. Use a bespoke algorithm (copied from the stdlib's sort + // package) to keep srt on the stack. + insertionSort(srt) + + // The smallest index into the array for which the value is acked by a + // quorum. In other words, from the end of the slice, move n/2+1 to the + // left (accounting for zero-indexing). + pos := n - (n/2 + 1) + return Index(srt[pos]) +} + +// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns +// a result indicating whether the vote is pending (i.e. neither a quorum of +// yes/no has been reached), won (a quorum of yes has been reached), or lost (a +// quorum of no has been reached). +func (c MajorityConfig) VoteResult(votes map[uint64]bool) VoteResult { + if len(c) == 0 { + // By convention, the elections on an empty config win. This comes in + // handy with joint quorums because it'll make a half-populated joint + // quorum behave like a majority quorum. + return VoteWon + } + + ny := [2]int{} // vote counts for no and yes, respectively + + var missing int + for id := range c { + v, ok := votes[id] + if !ok { + missing++ + continue + } + if v { + ny[1]++ + } else { + ny[0]++ + } + } + + q := len(c)/2 + 1 + if ny[1] >= q { + return VoteWon + } + if ny[1]+missing >= q { + return VotePending + } + return VoteLost +} diff --git a/vendor/go.etcd.io/etcd/raft/quorum/quorum.go b/vendor/go.etcd.io/etcd/raft/quorum/quorum.go new file mode 100644 index 0000000000..2899e46c96 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/quorum/quorum.go @@ -0,0 +1,58 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +import ( + "math" + "strconv" +) + +// Index is a Raft log position. +type Index uint64 + +func (i Index) String() string { + if i == math.MaxUint64 { + return "∞" + } + return strconv.FormatUint(uint64(i), 10) +} + +// AckedIndexer allows looking up a commit index for a given ID of a voter +// from a corresponding MajorityConfig. +type AckedIndexer interface { + AckedIndex(voterID uint64) (idx Index, found bool) +} + +type mapAckIndexer map[uint64]Index + +func (m mapAckIndexer) AckedIndex(id uint64) (Index, bool) { + idx, ok := m[id] + return idx, ok +} + +// VoteResult indicates the outcome of a vote. +// +//go:generate stringer -type=VoteResult +type VoteResult uint8 + +const ( + // VotePending indicates that the decision of the vote depends on future + // votes, i.e. neither "yes" or "no" has reached quorum yet. + VotePending VoteResult = 1 + iota + // VoteLost indicates that the quorum has voted "no". + VoteLost + // VoteWon indicates that the quorum has voted "yes". + VoteWon +) diff --git a/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go b/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go new file mode 100644 index 0000000000..9eca8fd0c9 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=VoteResult"; DO NOT EDIT. + +package quorum + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VotePending-1] + _ = x[VoteLost-2] + _ = x[VoteWon-3] +} + +const _VoteResult_name = "VotePendingVoteLostVoteWon" + +var _VoteResult_index = [...]uint8{0, 11, 19, 26} + +func (i VoteResult) String() string { + i -= 1 + if i >= VoteResult(len(_VoteResult_index)-1) { + return "VoteResult(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _VoteResult_name[_VoteResult_index[i]:_VoteResult_index[i+1]] +} diff --git a/vendor/go.etcd.io/etcd/raft/raft.go b/vendor/go.etcd.io/etcd/raft/raft.go new file mode 100644 index 0000000000..d3c3f42574 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raft.go @@ -0,0 +1,1656 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "errors" + "fmt" + "math" + "math/rand" + "sort" + "strings" + "sync" + "time" + + "go.etcd.io/etcd/raft/confchange" + "go.etcd.io/etcd/raft/quorum" + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// None is a placeholder node ID used when there is no leader. +const None uint64 = 0 +const noLimit = math.MaxUint64 + +// Possible values for StateType. +const ( + StateFollower StateType = iota + StateCandidate + StateLeader + StatePreCandidate + numStates +) + +type ReadOnlyOption int + +const ( + // ReadOnlySafe guarantees the linearizability of the read only request by + // communicating with the quorum. It is the default and suggested option. + ReadOnlySafe ReadOnlyOption = iota + // ReadOnlyLeaseBased ensures linearizability of the read only request by + // relying on the leader lease. It can be affected by clock drift. + // If the clock drift is unbounded, leader might keep the lease longer than it + // should (clock can move backward/pause without any bound). ReadIndex is not safe + // in that case. + ReadOnlyLeaseBased +) + +// Possible values for CampaignType +const ( + // campaignPreElection represents the first phase of a normal election when + // Config.PreVote is true. + campaignPreElection CampaignType = "CampaignPreElection" + // campaignElection represents a normal (time-based) election (the second phase + // of the election when Config.PreVote is true). + campaignElection CampaignType = "CampaignElection" + // campaignTransfer represents the type of leader transfer + campaignTransfer CampaignType = "CampaignTransfer" +) + +// ErrProposalDropped is returned when the proposal is ignored by some cases, +// so that the proposer can be notified and fail fast. +var ErrProposalDropped = errors.New("raft proposal dropped") + +// lockedRand is a small wrapper around rand.Rand to provide +// synchronization among multiple raft groups. Only the methods needed +// by the code are exposed (e.g. Intn). +type lockedRand struct { + mu sync.Mutex + rand *rand.Rand +} + +func (r *lockedRand) Intn(n int) int { + r.mu.Lock() + v := r.rand.Intn(n) + r.mu.Unlock() + return v +} + +var globalRand = &lockedRand{ + rand: rand.New(rand.NewSource(time.Now().UnixNano())), +} + +// CampaignType represents the type of campaigning +// the reason we use the type of string instead of uint64 +// is because it's simpler to compare and fill in raft entries +type CampaignType string + +// StateType represents the role of a node in a cluster. +type StateType uint64 + +var stmap = [...]string{ + "StateFollower", + "StateCandidate", + "StateLeader", + "StatePreCandidate", +} + +func (st StateType) String() string { + return stmap[uint64(st)] +} + +// Config contains the parameters to start a raft. +type Config struct { + // ID is the identity of the local raft. ID cannot be 0. + ID uint64 + + // peers contains the IDs of all nodes (including self) in the raft cluster. It + // should only be set when starting a new raft cluster. Restarting raft from + // previous configuration will panic if peers is set. peer is private and only + // used for testing right now. + peers []uint64 + + // learners contains the IDs of all learner nodes (including self if the + // local node is a learner) in the raft cluster. learners only receives + // entries from the leader node. It does not vote or promote itself. + learners []uint64 + + // ElectionTick is the number of Node.Tick invocations that must pass between + // elections. That is, if a follower does not receive any message from the + // leader of current term before ElectionTick has elapsed, it will become + // candidate and start an election. ElectionTick must be greater than + // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid + // unnecessary leader switching. + ElectionTick int + // HeartbeatTick is the number of Node.Tick invocations that must pass between + // heartbeats. That is, a leader sends heartbeat messages to maintain its + // leadership every HeartbeatTick ticks. + HeartbeatTick int + + // Storage is the storage for raft. raft generates entries and states to be + // stored in storage. raft reads the persisted entries and states out of + // Storage when it needs. raft reads out the previous state and configuration + // out of storage when restarting. + Storage Storage + // Applied is the last applied index. It should only be set when restarting + // raft. raft will not return entries to the application smaller or equal to + // Applied. If Applied is unset when restarting, raft might return previous + // applied entries. This is a very application dependent configuration. + Applied uint64 + + // MaxSizePerMsg limits the max byte size of each append message. Smaller + // value lowers the raft recovery cost(initial probing and message lost + // during normal operation). On the other side, it might affect the + // throughput during normal replication. Note: math.MaxUint64 for unlimited, + // 0 for at most one entry per message. + MaxSizePerMsg uint64 + // MaxCommittedSizePerReady limits the size of the committed entries which + // can be applied. + MaxCommittedSizePerReady uint64 + // MaxUncommittedEntriesSize limits the aggregate byte size of the + // uncommitted entries that may be appended to a leader's log. Once this + // limit is exceeded, proposals will begin to return ErrProposalDropped + // errors. Note: 0 for no limit. + MaxUncommittedEntriesSize uint64 + // MaxInflightMsgs limits the max number of in-flight append messages during + // optimistic replication phase. The application transportation layer usually + // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid + // overflowing that sending buffer. TODO (xiangli): feedback to application to + // limit the proposal rate? + MaxInflightMsgs int + + // CheckQuorum specifies if the leader should check quorum activity. Leader + // steps down when quorum is not active for an electionTimeout. + CheckQuorum bool + + // PreVote enables the Pre-Vote algorithm described in raft thesis section + // 9.6. This prevents disruption when a node that has been partitioned away + // rejoins the cluster. + PreVote bool + + // ReadOnlyOption specifies how the read only request is processed. + // + // ReadOnlySafe guarantees the linearizability of the read only request by + // communicating with the quorum. It is the default and suggested option. + // + // ReadOnlyLeaseBased ensures linearizability of the read only request by + // relying on the leader lease. It can be affected by clock drift. + // If the clock drift is unbounded, leader might keep the lease longer than it + // should (clock can move backward/pause without any bound). ReadIndex is not safe + // in that case. + // CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased. + ReadOnlyOption ReadOnlyOption + + // Logger is the logger used for raft log. For multinode which can host + // multiple raft group, each raft group can have its own logger + Logger Logger + + // DisableProposalForwarding set to true means that followers will drop + // proposals, rather than forwarding them to the leader. One use case for + // this feature would be in a situation where the Raft leader is used to + // compute the data of a proposal, for example, adding a timestamp from a + // hybrid logical clock to data in a monotonically increasing way. Forwarding + // should be disabled to prevent a follower with an inaccurate hybrid + // logical clock from assigning the timestamp and then forwarding the data + // to the leader. + DisableProposalForwarding bool +} + +func (c *Config) validate() error { + if c.ID == None { + return errors.New("cannot use none as id") + } + + if c.HeartbeatTick <= 0 { + return errors.New("heartbeat tick must be greater than 0") + } + + if c.ElectionTick <= c.HeartbeatTick { + return errors.New("election tick must be greater than heartbeat tick") + } + + if c.Storage == nil { + return errors.New("storage cannot be nil") + } + + if c.MaxUncommittedEntriesSize == 0 { + c.MaxUncommittedEntriesSize = noLimit + } + + // default MaxCommittedSizePerReady to MaxSizePerMsg because they were + // previously the same parameter. + if c.MaxCommittedSizePerReady == 0 { + c.MaxCommittedSizePerReady = c.MaxSizePerMsg + } + + if c.MaxInflightMsgs <= 0 { + return errors.New("max inflight messages must be greater than 0") + } + + if c.Logger == nil { + c.Logger = raftLogger + } + + if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum { + return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased") + } + + return nil +} + +type raft struct { + id uint64 + + Term uint64 + Vote uint64 + + readStates []ReadState + + // the log + raftLog *raftLog + + maxMsgSize uint64 + maxUncommittedSize uint64 + // TODO(tbg): rename to trk. + prs tracker.ProgressTracker + + state StateType + + // isLearner is true if the local raft node is a learner. + isLearner bool + + msgs []pb.Message + + // the leader id + lead uint64 + // leadTransferee is id of the leader transfer target when its value is not zero. + // Follow the procedure defined in raft thesis 3.10. + leadTransferee uint64 + // Only one conf change may be pending (in the log, but not yet + // applied) at a time. This is enforced via pendingConfIndex, which + // is set to a value >= the log index of the latest pending + // configuration change (if any). Config changes are only allowed to + // be proposed if the leader's applied index is greater than this + // value. + pendingConfIndex uint64 + // an estimate of the size of the uncommitted tail of the Raft log. Used to + // prevent unbounded log growth. Only maintained by the leader. Reset on + // term changes. + uncommittedSize uint64 + + readOnly *readOnly + + // number of ticks since it reached last electionTimeout when it is leader + // or candidate. + // number of ticks since it reached last electionTimeout or received a + // valid message from current leader when it is a follower. + electionElapsed int + + // number of ticks since it reached last heartbeatTimeout. + // only leader keeps heartbeatElapsed. + heartbeatElapsed int + + checkQuorum bool + preVote bool + + heartbeatTimeout int + electionTimeout int + // randomizedElectionTimeout is a random number between + // [electiontimeout, 2 * electiontimeout - 1]. It gets reset + // when raft changes its state to follower or candidate. + randomizedElectionTimeout int + disableProposalForwarding bool + + tick func() + step stepFunc + + logger Logger +} + +func newRaft(c *Config) *raft { + if err := c.validate(); err != nil { + panic(err.Error()) + } + raftlog := newLogWithSize(c.Storage, c.Logger, c.MaxCommittedSizePerReady) + hs, cs, err := c.Storage.InitialState() + if err != nil { + panic(err) // TODO(bdarnell) + } + + if len(c.peers) > 0 || len(c.learners) > 0 { + if len(cs.Voters) > 0 || len(cs.Learners) > 0 { + // TODO(bdarnell): the peers argument is always nil except in + // tests; the argument should be removed and these tests should be + // updated to specify their nodes through a snapshot. + panic("cannot specify both newRaft(peers, learners) and ConfState.(Voters, Learners)") + } + cs.Voters = c.peers + cs.Learners = c.learners + } + + r := &raft{ + id: c.ID, + lead: None, + isLearner: false, + raftLog: raftlog, + maxMsgSize: c.MaxSizePerMsg, + maxUncommittedSize: c.MaxUncommittedEntriesSize, + prs: tracker.MakeProgressTracker(c.MaxInflightMsgs), + electionTimeout: c.ElectionTick, + heartbeatTimeout: c.HeartbeatTick, + logger: c.Logger, + checkQuorum: c.CheckQuorum, + preVote: c.PreVote, + readOnly: newReadOnly(c.ReadOnlyOption), + disableProposalForwarding: c.DisableProposalForwarding, + } + + cfg, prs, err := confchange.Restore(confchange.Changer{ + Tracker: r.prs, + LastIndex: raftlog.lastIndex(), + }, cs) + if err != nil { + panic(err) + } + assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs)) + + if !IsEmptyHardState(hs) { + r.loadState(hs) + } + if c.Applied > 0 { + raftlog.appliedTo(c.Applied) + } + r.becomeFollower(r.Term, None) + + var nodesStrs []string + for _, n := range r.prs.VoterNodes() { + nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n)) + } + + r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]", + r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm()) + return r +} + +func (r *raft) hasLeader() bool { return r.lead != None } + +func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} } + +func (r *raft) hardState() pb.HardState { + return pb.HardState{ + Term: r.Term, + Vote: r.Vote, + Commit: r.raftLog.committed, + } +} + +// send persists state to stable storage and then sends to its mailbox. +func (r *raft) send(m pb.Message) { + m.From = r.id + if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp { + if m.Term == 0 { + // All {pre-,}campaign messages need to have the term set when + // sending. + // - MsgVote: m.Term is the term the node is campaigning for, + // non-zero as we increment the term when campaigning. + // - MsgVoteResp: m.Term is the new r.Term if the MsgVote was + // granted, non-zero for the same reason MsgVote is + // - MsgPreVote: m.Term is the term the node will campaign, + // non-zero as we use m.Term to indicate the next term we'll be + // campaigning for + // - MsgPreVoteResp: m.Term is the term received in the original + // MsgPreVote if the pre-vote was granted, non-zero for the + // same reasons MsgPreVote is + panic(fmt.Sprintf("term should be set when sending %s", m.Type)) + } + } else { + if m.Term != 0 { + panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term)) + } + // do not attach term to MsgProp, MsgReadIndex + // proposals are a way to forward to the leader and + // should be treated as local message. + // MsgReadIndex is also forwarded to leader. + if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex { + m.Term = r.Term + } + } + r.msgs = append(r.msgs, m) +} + +// sendAppend sends an append RPC with new entries (if any) and the +// current commit index to the given peer. +func (r *raft) sendAppend(to uint64) { + r.maybeSendAppend(to, true) +} + +// maybeSendAppend sends an append RPC with new entries to the given peer, +// if necessary. Returns true if a message was sent. The sendIfEmpty +// argument controls whether messages with no entries will be sent +// ("empty" messages are useful to convey updated Commit indexes, but +// are undesirable when we're sending multiple messages in a batch). +func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool { + pr := r.prs.Progress[to] + if pr.IsPaused() { + return false + } + m := pb.Message{} + m.To = to + + term, errt := r.raftLog.term(pr.Next - 1) + ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize) + if len(ents) == 0 && !sendIfEmpty { + return false + } + + if errt != nil || erre != nil { // send snapshot if we failed to get term or entries + if !pr.RecentActive { + r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to) + return false + } + + m.Type = pb.MsgSnap + snapshot, err := r.raftLog.snapshot() + if err != nil { + if err == ErrSnapshotTemporarilyUnavailable { + r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to) + return false + } + panic(err) // TODO(bdarnell) + } + if IsEmptySnap(snapshot) { + panic("need non-empty snapshot") + } + m.Snapshot = snapshot + sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term + r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]", + r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr) + pr.BecomeSnapshot(sindex) + r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr) + } else { + m.Type = pb.MsgApp + m.Index = pr.Next - 1 + m.LogTerm = term + m.Entries = ents + m.Commit = r.raftLog.committed + if n := len(m.Entries); n != 0 { + switch pr.State { + // optimistically increase the next when in StateReplicate + case tracker.StateReplicate: + last := m.Entries[n-1].Index + pr.OptimisticUpdate(last) + pr.Inflights.Add(last) + case tracker.StateProbe: + pr.ProbeSent = true + default: + r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State) + } + } + } + r.send(m) + return true +} + +// sendHeartbeat sends a heartbeat RPC to the given peer. +func (r *raft) sendHeartbeat(to uint64, ctx []byte) { + // Attach the commit as min(to.matched, r.committed). + // When the leader sends out heartbeat message, + // the receiver(follower) might not be matched with the leader + // or it might not have all the committed entries. + // The leader MUST NOT forward the follower's commit to + // an unmatched index. + commit := min(r.prs.Progress[to].Match, r.raftLog.committed) + m := pb.Message{ + To: to, + Type: pb.MsgHeartbeat, + Commit: commit, + Context: ctx, + } + + r.send(m) +} + +// bcastAppend sends RPC, with entries to all peers that are not up-to-date +// according to the progress recorded in r.prs. +func (r *raft) bcastAppend() { + r.prs.Visit(func(id uint64, _ *tracker.Progress) { + if id == r.id { + return + } + r.sendAppend(id) + }) +} + +// bcastHeartbeat sends RPC, without entries to all the peers. +func (r *raft) bcastHeartbeat() { + lastCtx := r.readOnly.lastPendingRequestCtx() + if len(lastCtx) == 0 { + r.bcastHeartbeatWithCtx(nil) + } else { + r.bcastHeartbeatWithCtx([]byte(lastCtx)) + } +} + +func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { + r.prs.Visit(func(id uint64, _ *tracker.Progress) { + if id == r.id { + return + } + r.sendHeartbeat(id, ctx) + }) +} + +func (r *raft) advance(rd Ready) { + // If entries were applied (or a snapshot), update our cursor for + // the next Ready. Note that if the current HardState contains a + // new Commit index, this does not mean that we're also applying + // all of the new entries due to commit pagination by size. + if index := rd.appliedCursor(); index > 0 { + r.raftLog.appliedTo(index) + if r.prs.Config.AutoLeave && index >= r.pendingConfIndex && r.state == StateLeader { + // If the current (and most recent, at least for this leader's term) + // configuration should be auto-left, initiate that now. + ccdata, err := (&pb.ConfChangeV2{}).Marshal() + if err != nil { + panic(err) + } + ent := pb.Entry{ + Type: pb.EntryConfChangeV2, + Data: ccdata, + } + if !r.appendEntry(ent) { + // If we could not append the entry, bump the pending conf index + // so that we'll try again later. + // + // TODO(tbg): test this case. + r.pendingConfIndex = r.raftLog.lastIndex() + } else { + r.logger.Infof("initiating automatic transition out of joint configuration %s", r.prs.Config) + } + } + } + r.reduceUncommittedSize(rd.CommittedEntries) + + if len(rd.Entries) > 0 { + e := rd.Entries[len(rd.Entries)-1] + r.raftLog.stableTo(e.Index, e.Term) + } + if !IsEmptySnap(rd.Snapshot) { + r.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) + } +} + +// maybeCommit attempts to advance the commit index. Returns true if +// the commit index changed (in which case the caller should call +// r.bcastAppend). +func (r *raft) maybeCommit() bool { + mci := r.prs.Committed() + return r.raftLog.maybeCommit(mci, r.Term) +} + +func (r *raft) reset(term uint64) { + if r.Term != term { + r.Term = term + r.Vote = None + } + r.lead = None + + r.electionElapsed = 0 + r.heartbeatElapsed = 0 + r.resetRandomizedElectionTimeout() + + r.abortLeaderTransfer() + + r.prs.ResetVotes() + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + *pr = tracker.Progress{ + Match: 0, + Next: r.raftLog.lastIndex() + 1, + Inflights: tracker.NewInflights(r.prs.MaxInflight), + IsLearner: pr.IsLearner, + } + if id == r.id { + pr.Match = r.raftLog.lastIndex() + } + }) + + r.pendingConfIndex = 0 + r.uncommittedSize = 0 + r.readOnly = newReadOnly(r.readOnly.option) +} + +func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) { + li := r.raftLog.lastIndex() + for i := range es { + es[i].Term = r.Term + es[i].Index = li + 1 + uint64(i) + } + // Track the size of this uncommitted proposal. + if !r.increaseUncommittedSize(es) { + r.logger.Debugf( + "%x appending new entries to log would exceed uncommitted entry size limit; dropping proposal", + r.id, + ) + // Drop the proposal. + return false + } + // use latest "last" index after truncate/append + li = r.raftLog.append(es...) + r.prs.Progress[r.id].MaybeUpdate(li) + // Regardless of maybeCommit's return, our caller will call bcastAppend. + r.maybeCommit() + return true +} + +// tickElection is run by followers and candidates after r.electionTimeout. +func (r *raft) tickElection() { + r.electionElapsed++ + + if r.promotable() && r.pastElectionTimeout() { + r.electionElapsed = 0 + r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) + } +} + +// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout. +func (r *raft) tickHeartbeat() { + r.heartbeatElapsed++ + r.electionElapsed++ + + if r.electionElapsed >= r.electionTimeout { + r.electionElapsed = 0 + if r.checkQuorum { + r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}) + } + // If current leader cannot transfer leadership in electionTimeout, it becomes leader again. + if r.state == StateLeader && r.leadTransferee != None { + r.abortLeaderTransfer() + } + } + + if r.state != StateLeader { + return + } + + if r.heartbeatElapsed >= r.heartbeatTimeout { + r.heartbeatElapsed = 0 + r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}) + } +} + +func (r *raft) becomeFollower(term uint64, lead uint64) { + r.step = stepFollower + r.reset(term) + r.tick = r.tickElection + r.lead = lead + r.state = StateFollower + r.logger.Infof("%x became follower at term %d", r.id, r.Term) +} + +func (r *raft) becomeCandidate() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateLeader { + panic("invalid transition [leader -> candidate]") + } + r.step = stepCandidate + r.reset(r.Term + 1) + r.tick = r.tickElection + r.Vote = r.id + r.state = StateCandidate + r.logger.Infof("%x became candidate at term %d", r.id, r.Term) +} + +func (r *raft) becomePreCandidate() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateLeader { + panic("invalid transition [leader -> pre-candidate]") + } + // Becoming a pre-candidate changes our step functions and state, + // but doesn't change anything else. In particular it does not increase + // r.Term or change r.Vote. + r.step = stepCandidate + r.prs.ResetVotes() + r.tick = r.tickElection + r.lead = None + r.state = StatePreCandidate + r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term) +} + +func (r *raft) becomeLeader() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateFollower { + panic("invalid transition [follower -> leader]") + } + r.step = stepLeader + r.reset(r.Term) + r.tick = r.tickHeartbeat + r.lead = r.id + r.state = StateLeader + // Followers enter replicate mode when they've been successfully probed + // (perhaps after having received a snapshot as a result). The leader is + // trivially in this state. Note that r.reset() has initialized this + // progress with the last index already. + r.prs.Progress[r.id].BecomeReplicate() + + // Conservatively set the pendingConfIndex to the last index in the + // log. There may or may not be a pending config change, but it's + // safe to delay any future proposals until we commit all our + // pending log entries, and scanning the entire tail of the log + // could be expensive. + r.pendingConfIndex = r.raftLog.lastIndex() + + emptyEnt := pb.Entry{Data: nil} + if !r.appendEntry(emptyEnt) { + // This won't happen because we just called reset() above. + r.logger.Panic("empty entry was dropped") + } + // As a special case, don't count the initial empty entry towards the + // uncommitted log quota. This is because we want to preserve the + // behavior of allowing one entry larger than quota if the current + // usage is zero. + r.reduceUncommittedSize([]pb.Entry{emptyEnt}) + r.logger.Infof("%x became leader at term %d", r.id, r.Term) +} + +// campaign transitions the raft instance to candidate state. This must only be +// called after verifying that this is a legitimate transition. +func (r *raft) campaign(t CampaignType) { + if !r.promotable() { + // This path should not be hit (callers are supposed to check), but + // better safe than sorry. + r.logger.Warningf("%x is unpromotable; campaign() should have been called", r.id) + } + var term uint64 + var voteMsg pb.MessageType + if t == campaignPreElection { + r.becomePreCandidate() + voteMsg = pb.MsgPreVote + // PreVote RPCs are sent for the next term before we've incremented r.Term. + term = r.Term + 1 + } else { + r.becomeCandidate() + voteMsg = pb.MsgVote + term = r.Term + } + if _, _, res := r.poll(r.id, voteRespMsgType(voteMsg), true); res == quorum.VoteWon { + // We won the election after voting for ourselves (which must mean that + // this is a single-node cluster). Advance to the next state. + if t == campaignPreElection { + r.campaign(campaignElection) + } else { + r.becomeLeader() + } + return + } + var ids []uint64 + { + idMap := r.prs.Voters.IDs() + ids = make([]uint64, 0, len(idMap)) + for id := range idMap { + ids = append(ids, id) + } + sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) + } + for _, id := range ids { + if id == r.id { + continue + } + r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term) + + var ctx []byte + if t == campaignTransfer { + ctx = []byte(t) + } + r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx}) + } +} + +func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int, rejected int, result quorum.VoteResult) { + if v { + r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term) + } else { + r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term) + } + r.prs.RecordVote(id, v) + return r.prs.TallyVotes() +} + +func (r *raft) Step(m pb.Message) error { + // Handle the message term, which may result in our stepping down to a follower. + switch { + case m.Term == 0: + // local message + case m.Term > r.Term: + if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote { + force := bytes.Equal(m.Context, []byte(campaignTransfer)) + inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout + if !force && inLease { + // If a server receives a RequestVote request within the minimum election timeout + // of hearing from a current leader, it does not update its term or grant its vote + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed) + return nil + } + } + switch { + case m.Type == pb.MsgPreVote: + // Never change our term in response to a PreVote + case m.Type == pb.MsgPreVoteResp && !m.Reject: + // We send pre-vote requests with a term in our future. If the + // pre-vote is granted, we will increment our term when we get a + // quorum. If it is not, the term comes from the node that + // rejected our vote so we should become a follower at the new + // term. + default: + r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]", + r.id, r.Term, m.Type, m.From, m.Term) + if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap { + r.becomeFollower(m.Term, m.From) + } else { + r.becomeFollower(m.Term, None) + } + } + + case m.Term < r.Term: + if (r.checkQuorum || r.preVote) && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) { + // We have received messages from a leader at a lower term. It is possible + // that these messages were simply delayed in the network, but this could + // also mean that this node has advanced its term number during a network + // partition, and it is now unable to either win an election or to rejoin + // the majority on the old term. If checkQuorum is false, this will be + // handled by incrementing term numbers in response to MsgVote with a + // higher term, but if checkQuorum is true we may not advance the term on + // MsgVote and must generate other messages to advance the term. The net + // result of these two features is to minimize the disruption caused by + // nodes that have been removed from the cluster's configuration: a + // removed node will send MsgVotes (or MsgPreVotes) which will be ignored, + // but it will not receive MsgApp or MsgHeartbeat, so it will not create + // disruptive term increases, by notifying leader of this node's activeness. + // The above comments also true for Pre-Vote + // + // When follower gets isolated, it soon starts an election ending + // up with a higher term than leader, although it won't receive enough + // votes to win the election. When it regains connectivity, this response + // with "pb.MsgAppResp" of higher term would force leader to step down. + // However, this disruption is inevitable to free this stuck node with + // fresh election. This can be prevented with Pre-Vote phase. + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp}) + } else if m.Type == pb.MsgPreVote { + // Before Pre-Vote enable, there may have candidate with higher term, + // but less log. After update to Pre-Vote, the cluster may deadlock if + // we drop messages with a lower term. + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true}) + } else { + // ignore other cases + r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]", + r.id, r.Term, m.Type, m.From, m.Term) + } + return nil + } + + switch m.Type { + case pb.MsgHup: + if r.state != StateLeader { + if !r.promotable() { + r.logger.Warningf("%x is unpromotable and can not campaign; ignoring MsgHup", r.id) + return nil + } + ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit) + if err != nil { + r.logger.Panicf("unexpected error getting unapplied entries (%v)", err) + } + if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied { + r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n) + return nil + } + + r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term) + if r.preVote { + r.campaign(campaignPreElection) + } else { + r.campaign(campaignElection) + } + } else { + r.logger.Debugf("%x ignoring MsgHup because already leader", r.id) + } + + case pb.MsgVote, pb.MsgPreVote: + // We can vote if this is a repeat of a vote we've already cast... + canVote := r.Vote == m.From || + // ...we haven't voted and we don't think there's a leader yet in this term... + (r.Vote == None && r.lead == None) || + // ...or this is a PreVote for a future term... + (m.Type == pb.MsgPreVote && m.Term > r.Term) + // ...and we believe the candidate is up to date. + if canVote && r.raftLog.isUpToDate(m.Index, m.LogTerm) { + // Note: it turns out that that learners must be allowed to cast votes. + // This seems counter- intuitive but is necessary in the situation in which + // a learner has been promoted (i.e. is now a voter) but has not learned + // about this yet. + // For example, consider a group in which id=1 is a learner and id=2 and + // id=3 are voters. A configuration change promoting 1 can be committed on + // the quorum `{2,3}` without the config change being appended to the + // learner's log. If the leader (say 2) fails, there are de facto two + // voters remaining. Only 3 can win an election (due to its log containing + // all committed entries), but to do so it will need 1 to vote. But 1 + // considers itself a learner and will continue to do so until 3 has + // stepped up as leader, replicates the conf change to 1, and 1 applies it. + // Ultimately, by receiving a request to vote, the learner realizes that + // the candidate believes it to be a voter, and that it should act + // accordingly. The candidate's config may be stale, too; but in that case + // it won't win the election, at least in the absence of the bug discussed + // in: + // https://github.com/etcd-io/etcd/issues/7625#issuecomment-488798263. + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + // When responding to Msg{Pre,}Vote messages we include the term + // from the message, not the local term. To see why, consider the + // case where a single node was previously partitioned away and + // it's local term is now out of date. If we include the local term + // (recall that for pre-votes we don't update the local term), the + // (pre-)campaigning node on the other end will proceed to ignore + // the message (it ignores all out of date messages). + // The term in the original message and current local term are the + // same in the case of regular votes, but different for pre-votes. + r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)}) + if m.Type == pb.MsgVote { + // Only record real votes. + r.electionElapsed = 0 + r.Vote = m.From + } + } else { + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true}) + } + + default: + err := r.step(r, m) + if err != nil { + return err + } + } + return nil +} + +type stepFunc func(r *raft, m pb.Message) error + +func stepLeader(r *raft, m pb.Message) error { + // These message types do not require any progress for m.From. + switch m.Type { + case pb.MsgBeat: + r.bcastHeartbeat() + return nil + case pb.MsgCheckQuorum: + // The leader should always see itself as active. As a precaution, handle + // the case in which the leader isn't in the configuration any more (for + // example if it just removed itself). + // + // TODO(tbg): I added a TODO in removeNode, it doesn't seem that the + // leader steps down when removing itself. I might be missing something. + if pr := r.prs.Progress[r.id]; pr != nil { + pr.RecentActive = true + } + if !r.prs.QuorumActive() { + r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id) + r.becomeFollower(r.Term, None) + } + // Mark everyone (but ourselves) as inactive in preparation for the next + // CheckQuorum. + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + if id != r.id { + pr.RecentActive = false + } + }) + return nil + case pb.MsgProp: + if len(m.Entries) == 0 { + r.logger.Panicf("%x stepped empty MsgProp", r.id) + } + if r.prs.Progress[r.id] == nil { + // If we are not currently a member of the range (i.e. this node + // was removed from the configuration while serving as leader), + // drop any new proposals. + return ErrProposalDropped + } + if r.leadTransferee != None { + r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee) + return ErrProposalDropped + } + + for i := range m.Entries { + e := &m.Entries[i] + var cc pb.ConfChangeI + if e.Type == pb.EntryConfChange { + var ccc pb.ConfChange + if err := ccc.Unmarshal(e.Data); err != nil { + panic(err) + } + cc = ccc + } else if e.Type == pb.EntryConfChangeV2 { + var ccc pb.ConfChangeV2 + if err := ccc.Unmarshal(e.Data); err != nil { + panic(err) + } + cc = ccc + } + if cc != nil { + alreadyPending := r.pendingConfIndex > r.raftLog.applied + alreadyJoint := len(r.prs.Config.Voters[1]) > 0 + wantsLeaveJoint := len(cc.AsV2().Changes) == 0 + + var refused string + if alreadyPending { + refused = fmt.Sprintf("possible unapplied conf change at index %d (applied to %d)", r.pendingConfIndex, r.raftLog.applied) + } else if alreadyJoint && !wantsLeaveJoint { + refused = "must transition out of joint config first" + } else if !alreadyJoint && wantsLeaveJoint { + refused = "not in joint state; refusing empty conf change" + } + + if refused != "" { + r.logger.Infof("%x ignoring conf change %v at config %s: %s", r.id, cc, r.prs.Config, refused) + m.Entries[i] = pb.Entry{Type: pb.EntryNormal} + } else { + r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1 + } + } + } + + if !r.appendEntry(m.Entries...) { + return ErrProposalDropped + } + r.bcastAppend() + return nil + case pb.MsgReadIndex: + // If more than the local vote is needed, go through a full broadcast, + // otherwise optimize. + if !r.prs.IsSingleton() { + if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term { + // Reject read only request when this leader has not committed any log entry at its term. + return nil + } + + // thinking: use an interally defined context instead of the user given context. + // We can express this in terms of the term and index instead of a user-supplied value. + // This would allow multiple reads to piggyback on the same message. + switch r.readOnly.option { + case ReadOnlySafe: + r.readOnly.addRequest(r.raftLog.committed, m) + // The local node automatically acks the request. + r.readOnly.recvAck(r.id, m.Entries[0].Data) + r.bcastHeartbeatWithCtx(m.Entries[0].Data) + case ReadOnlyLeaseBased: + ri := r.raftLog.committed + if m.From == None || m.From == r.id { // from local member + r.readStates = append(r.readStates, ReadState{Index: ri, RequestCtx: m.Entries[0].Data}) + } else { + r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries}) + } + } + } else { // only one voting member (the leader) in the cluster + if m.From == None || m.From == r.id { // from leader itself + r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + } else { // from learner member + r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: r.raftLog.committed, Entries: m.Entries}) + } + } + + return nil + } + + // All other message types require a progress for m.From (pr). + pr := r.prs.Progress[m.From] + if pr == nil { + r.logger.Debugf("%x no progress available for %x", r.id, m.From) + return nil + } + switch m.Type { + case pb.MsgAppResp: + pr.RecentActive = true + + if m.Reject { + r.logger.Debugf("%x received MsgAppResp(MsgApp was rejected, lastindex: %d) from %x for index %d", + r.id, m.RejectHint, m.From, m.Index) + if pr.MaybeDecrTo(m.Index, m.RejectHint) { + r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr) + if pr.State == tracker.StateReplicate { + pr.BecomeProbe() + } + r.sendAppend(m.From) + } + } else { + oldPaused := pr.IsPaused() + if pr.MaybeUpdate(m.Index) { + switch { + case pr.State == tracker.StateProbe: + pr.BecomeReplicate() + case pr.State == tracker.StateSnapshot && pr.Match >= pr.PendingSnapshot: + // TODO(tbg): we should also enter this branch if a snapshot is + // received that is below pr.PendingSnapshot but which makes it + // possible to use the log again. + r.logger.Debugf("%x recovered from needing snapshot, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + // Transition back to replicating state via probing state + // (which takes the snapshot into account). If we didn't + // move to replicating state, that would only happen with + // the next round of appends (but there may not be a next + // round for a while, exposing an inconsistent RaftStatus). + pr.BecomeProbe() + pr.BecomeReplicate() + case pr.State == tracker.StateReplicate: + pr.Inflights.FreeLE(m.Index) + } + + if r.maybeCommit() { + r.bcastAppend() + } else if oldPaused { + // If we were paused before, this node may be missing the + // latest commit index, so send it. + r.sendAppend(m.From) + } + // We've updated flow control information above, which may + // allow us to send multiple (size-limited) in-flight messages + // at once (such as when transitioning from probe to + // replicate, or when freeTo() covers multiple messages). If + // we have more entries to send, send as many messages as we + // can (without sending empty messages for the commit index) + for r.maybeSendAppend(m.From, false) { + } + // Transfer leadership is in progress. + if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() { + r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From) + r.sendTimeoutNow(m.From) + } + } + } + case pb.MsgHeartbeatResp: + pr.RecentActive = true + pr.ProbeSent = false + + // free one slot for the full inflights window to allow progress. + if pr.State == tracker.StateReplicate && pr.Inflights.Full() { + pr.Inflights.FreeFirstOne() + } + if pr.Match < r.raftLog.lastIndex() { + r.sendAppend(m.From) + } + + if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 { + return nil + } + + if r.prs.Voters.VoteResult(r.readOnly.recvAck(m.From, m.Context)) != quorum.VoteWon { + return nil + } + + rss := r.readOnly.advance(m) + for _, rs := range rss { + req := rs.req + if req.From == None || req.From == r.id { // from local member + r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data}) + } else { + r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries}) + } + } + case pb.MsgSnapStatus: + if pr.State != tracker.StateSnapshot { + return nil + } + // TODO(tbg): this code is very similar to the snapshot handling in + // MsgAppResp above. In fact, the code there is more correct than the + // code here and should likely be updated to match (or even better, the + // logic pulled into a newly created Progress state machine handler). + if !m.Reject { + pr.BecomeProbe() + r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + } else { + // NB: the order here matters or we'll be probing erroneously from + // the snapshot index, but the snapshot never applied. + pr.PendingSnapshot = 0 + pr.BecomeProbe() + r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + } + // If snapshot finish, wait for the MsgAppResp from the remote node before sending + // out the next MsgApp. + // If snapshot failure, wait for a heartbeat interval before next try + pr.ProbeSent = true + case pb.MsgUnreachable: + // During optimistic replication, if the remote becomes unreachable, + // there is huge probability that a MsgApp is lost. + if pr.State == tracker.StateReplicate { + pr.BecomeProbe() + } + r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr) + case pb.MsgTransferLeader: + if pr.IsLearner { + r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id) + return nil + } + leadTransferee := m.From + lastLeadTransferee := r.leadTransferee + if lastLeadTransferee != None { + if lastLeadTransferee == leadTransferee { + r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x", + r.id, r.Term, leadTransferee, leadTransferee) + return nil + } + r.abortLeaderTransfer() + r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee) + } + if leadTransferee == r.id { + r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id) + return nil + } + // Transfer leadership to third party. + r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee) + // Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed. + r.electionElapsed = 0 + r.leadTransferee = leadTransferee + if pr.Match == r.raftLog.lastIndex() { + r.sendTimeoutNow(leadTransferee) + r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee) + } else { + r.sendAppend(leadTransferee) + } + } + return nil +} + +// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is +// whether they respond to MsgVoteResp or MsgPreVoteResp. +func stepCandidate(r *raft, m pb.Message) error { + // Only handle vote responses corresponding to our candidacy (while in + // StateCandidate, we may get stale MsgPreVoteResp messages in this term from + // our pre-candidate state). + var myVoteRespType pb.MessageType + if r.state == StatePreCandidate { + myVoteRespType = pb.MsgPreVoteResp + } else { + myVoteRespType = pb.MsgVoteResp + } + switch m.Type { + case pb.MsgProp: + r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) + return ErrProposalDropped + case pb.MsgApp: + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term + r.handleAppendEntries(m) + case pb.MsgHeartbeat: + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term + r.handleHeartbeat(m) + case pb.MsgSnap: + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term + r.handleSnapshot(m) + case myVoteRespType: + gr, rj, res := r.poll(m.From, m.Type, !m.Reject) + r.logger.Infof("%x has received %d %s votes and %d vote rejections", r.id, gr, m.Type, rj) + switch res { + case quorum.VoteWon: + if r.state == StatePreCandidate { + r.campaign(campaignElection) + } else { + r.becomeLeader() + r.bcastAppend() + } + case quorum.VoteLost: + // pb.MsgPreVoteResp contains future term of pre-candidate + // m.Term > r.Term; reuse r.Term + r.becomeFollower(r.Term, None) + } + case pb.MsgTimeoutNow: + r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From) + } + return nil +} + +func stepFollower(r *raft, m pb.Message) error { + switch m.Type { + case pb.MsgProp: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) + return ErrProposalDropped + } else if r.disableProposalForwarding { + r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term) + return ErrProposalDropped + } + m.To = r.lead + r.send(m) + case pb.MsgApp: + r.electionElapsed = 0 + r.lead = m.From + r.handleAppendEntries(m) + case pb.MsgHeartbeat: + r.electionElapsed = 0 + r.lead = m.From + r.handleHeartbeat(m) + case pb.MsgSnap: + r.electionElapsed = 0 + r.lead = m.From + r.handleSnapshot(m) + case pb.MsgTransferLeader: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term) + return nil + } + m.To = r.lead + r.send(m) + case pb.MsgTimeoutNow: + if r.promotable() { + r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From) + // Leadership transfers never use pre-vote even if r.preVote is true; we + // know we are not recovering from a partition so there is no need for the + // extra round trip. + r.campaign(campaignTransfer) + } else { + r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From) + } + case pb.MsgReadIndex: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term) + return nil + } + m.To = r.lead + r.send(m) + case pb.MsgReadIndexResp: + if len(m.Entries) != 1 { + r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries)) + return nil + } + r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data}) + } + return nil +} + +func (r *raft) handleAppendEntries(m pb.Message) { + if m.Index < r.raftLog.committed { + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) + return + } + + if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok { + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex}) + } else { + r.logger.Debugf("%x [logterm: %d, index: %d] rejected MsgApp [logterm: %d, index: %d] from %x", + r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()}) + } +} + +func (r *raft) handleHeartbeat(m pb.Message) { + r.raftLog.commitTo(m.Commit) + r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context}) +} + +func (r *raft) handleSnapshot(m pb.Message) { + sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term + if r.restore(m.Snapshot) { + r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, sindex, sterm) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()}) + } else { + r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, sindex, sterm) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) + } +} + +// restore recovers the state machine from a snapshot. It restores the log and the +// configuration of state machine. If this method returns false, the snapshot was +// ignored, either because it was obsolete or because of an error. +func (r *raft) restore(s pb.Snapshot) bool { + if s.Metadata.Index <= r.raftLog.committed { + return false + } + if r.state != StateFollower { + // This is defense-in-depth: if the leader somehow ended up applying a + // snapshot, it could move into a new term without moving into a + // follower state. This should never fire, but if it did, we'd have + // prevented damage by returning early, so log only a loud warning. + // + // At the time of writing, the instance is guaranteed to be in follower + // state when this method is called. + r.logger.Warningf("%x attempted to restore snapshot as leader; should never happen", r.id) + r.becomeFollower(r.Term+1, None) + return false + } + + // More defense-in-depth: throw away snapshot if recipient is not in the + // config. This shouuldn't ever happen (at the time of writing) but lots of + // code here and there assumes that r.id is in the progress tracker. + found := false + cs := s.Metadata.ConfState + for _, set := range [][]uint64{ + cs.Voters, + cs.Learners, + } { + for _, id := range set { + if id == r.id { + found = true + break + } + } + } + if !found { + r.logger.Warningf( + "%x attempted to restore snapshot but it is not in the ConfState %v; should never happen", + r.id, cs, + ) + return false + } + + // Now go ahead and actually restore. + + if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + r.raftLog.commitTo(s.Metadata.Index) + return false + } + + r.raftLog.restore(s) + + // Reset the configuration and add the (potentially updated) peers in anew. + r.prs = tracker.MakeProgressTracker(r.prs.MaxInflight) + cfg, prs, err := confchange.Restore(confchange.Changer{ + Tracker: r.prs, + LastIndex: r.raftLog.lastIndex(), + }, cs) + + if err != nil { + // This should never happen. Either there's a bug in our config change + // handling or the client corrupted the conf change. + panic(fmt.Sprintf("unable to restore config %+v: %s", cs, err)) + } + + assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs)) + + pr := r.prs.Progress[r.id] + pr.MaybeUpdate(pr.Next - 1) // TODO(tbg): this is untested and likely unneeded + + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] restored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + return true +} + +// promotable indicates whether state machine can be promoted to leader, +// which is true when its own id is in progress list. +func (r *raft) promotable() bool { + pr := r.prs.Progress[r.id] + return pr != nil && !pr.IsLearner +} + +func (r *raft) applyConfChange(cc pb.ConfChangeV2) pb.ConfState { + cfg, prs, err := func() (tracker.Config, tracker.ProgressMap, error) { + changer := confchange.Changer{ + Tracker: r.prs, + LastIndex: r.raftLog.lastIndex(), + } + if cc.LeaveJoint() { + return changer.LeaveJoint() + } else if autoLeave, ok := cc.EnterJoint(); ok { + return changer.EnterJoint(autoLeave, cc.Changes...) + } + return changer.Simple(cc.Changes...) + }() + + if err != nil { + // TODO(tbg): return the error to the caller. + panic(err) + } + + return r.switchToConfig(cfg, prs) +} + +// switchToConfig reconfigures this node to use the provided configuration. It +// updates the in-memory state and, when necessary, carries out additional +// actions such as reacting to the removal of nodes or changed quorum +// requirements. +// +// The inputs usually result from restoring a ConfState or applying a ConfChange. +func (r *raft) switchToConfig(cfg tracker.Config, prs tracker.ProgressMap) pb.ConfState { + r.prs.Config = cfg + r.prs.Progress = prs + + r.logger.Infof("%x switched to configuration %s", r.id, r.prs.Config) + cs := r.prs.ConfState() + pr, ok := r.prs.Progress[r.id] + + // Update whether the node itself is a learner, resetting to false when the + // node is removed. + r.isLearner = ok && pr.IsLearner + + if (!ok || r.isLearner) && r.state == StateLeader { + // This node is leader and was removed or demoted. We prevent demotions + // at the time writing but hypothetically we handle them the same way as + // removing the leader: stepping down into the next Term. + // + // TODO(tbg): step down (for sanity) and ask follower with largest Match + // to TimeoutNow (to avoid interruption). This might still drop some + // proposals but it's better than nothing. + // + // TODO(tbg): test this branch. It is untested at the time of writing. + return cs + } + + // The remaining steps only make sense if this node is the leader and there + // are other nodes. + if r.state != StateLeader || len(cs.Voters) == 0 { + return cs + } + + if r.maybeCommit() { + // If the configuration change means that more entries are committed now, + // broadcast/append to everyone in the updated config. + r.bcastAppend() + } else { + // Otherwise, still probe the newly added replicas; there's no reason to + // let them wait out a heartbeat interval (or the next incoming + // proposal). + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + r.maybeSendAppend(id, false /* sendIfEmpty */) + }) + } + // If the the leadTransferee was removed, abort the leadership transfer. + if _, tOK := r.prs.Progress[r.leadTransferee]; !tOK && r.leadTransferee != 0 { + r.abortLeaderTransfer() + } + + return cs +} + +func (r *raft) loadState(state pb.HardState) { + if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() { + r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex()) + } + r.raftLog.committed = state.Commit + r.Term = state.Term + r.Vote = state.Vote +} + +// pastElectionTimeout returns true iff r.electionElapsed is greater +// than or equal to the randomized election timeout in +// [electiontimeout, 2 * electiontimeout - 1]. +func (r *raft) pastElectionTimeout() bool { + return r.electionElapsed >= r.randomizedElectionTimeout +} + +func (r *raft) resetRandomizedElectionTimeout() { + r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout) +} + +func (r *raft) sendTimeoutNow(to uint64) { + r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow}) +} + +func (r *raft) abortLeaderTransfer() { + r.leadTransferee = None +} + +// increaseUncommittedSize computes the size of the proposed entries and +// determines whether they would push leader over its maxUncommittedSize limit. +// If the new entries would exceed the limit, the method returns false. If not, +// the increase in uncommitted entry size is recorded and the method returns +// true. +func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool { + var s uint64 + for _, e := range ents { + s += uint64(PayloadSize(e)) + } + + if r.uncommittedSize > 0 && r.uncommittedSize+s > r.maxUncommittedSize { + // If the uncommitted tail of the Raft log is empty, allow any size + // proposal. Otherwise, limit the size of the uncommitted tail of the + // log and drop any proposal that would push the size over the limit. + return false + } + r.uncommittedSize += s + return true +} + +// reduceUncommittedSize accounts for the newly committed entries by decreasing +// the uncommitted entry size limit. +func (r *raft) reduceUncommittedSize(ents []pb.Entry) { + if r.uncommittedSize == 0 { + // Fast-path for followers, who do not track or enforce the limit. + return + } + + var s uint64 + for _, e := range ents { + s += uint64(PayloadSize(e)) + } + if s > r.uncommittedSize { + // uncommittedSize may underestimate the size of the uncommitted Raft + // log tail but will never overestimate it. Saturate at 0 instead of + // allowing overflow. + r.uncommittedSize = 0 + } else { + r.uncommittedSize -= s + } +} + +func numOfPendingConf(ents []pb.Entry) int { + n := 0 + for i := range ents { + if ents[i].Type == pb.EntryConfChange { + n++ + } + } + return n +} diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go b/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go new file mode 100644 index 0000000000..46a7a70212 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go @@ -0,0 +1,170 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftpb + +import ( + "fmt" + "strconv" + "strings" + + "github.com/gogo/protobuf/proto" +) + +// ConfChangeI abstracts over ConfChangeV2 and (legacy) ConfChange to allow +// treating them in a unified manner. +type ConfChangeI interface { + AsV2() ConfChangeV2 + AsV1() (ConfChange, bool) +} + +// MarshalConfChange calls Marshal on the underlying ConfChange or ConfChangeV2 +// and returns the result along with the corresponding EntryType. +func MarshalConfChange(c ConfChangeI) (EntryType, []byte, error) { + var typ EntryType + var ccdata []byte + var err error + if ccv1, ok := c.AsV1(); ok { + typ = EntryConfChange + ccdata, err = ccv1.Marshal() + } else { + ccv2 := c.AsV2() + typ = EntryConfChangeV2 + ccdata, err = ccv2.Marshal() + } + return typ, ccdata, err +} + +// AsV2 returns a V2 configuration change carrying out the same operation. +func (c ConfChange) AsV2() ConfChangeV2 { + return ConfChangeV2{ + Changes: []ConfChangeSingle{{ + Type: c.Type, + NodeID: c.NodeID, + }}, + Context: c.Context, + } +} + +// AsV1 returns the ConfChange and true. +func (c ConfChange) AsV1() (ConfChange, bool) { + return c, true +} + +// AsV2 is the identity. +func (c ConfChangeV2) AsV2() ConfChangeV2 { return c } + +// AsV1 returns ConfChange{} and false. +func (c ConfChangeV2) AsV1() (ConfChange, bool) { return ConfChange{}, false } + +// EnterJoint returns two bools. The second bool is true if and only if this +// config change will use Joint Consensus, which is the case if it contains more +// than one change or if the use of Joint Consensus was requested explicitly. +// The first bool can only be true if second one is, and indicates whether the +// Joint State will be left automatically. +func (c *ConfChangeV2) EnterJoint() (autoLeave bool, ok bool) { + // NB: in theory, more config changes could qualify for the "simple" + // protocol but it depends on the config on top of which the changes apply. + // For example, adding two learners is not OK if both nodes are part of the + // base config (i.e. two voters are turned into learners in the process of + // applying the conf change). In practice, these distinctions should not + // matter, so we keep it simple and use Joint Consensus liberally. + if c.Transition != ConfChangeTransitionAuto || len(c.Changes) > 1 { + // Use Joint Consensus. + var autoLeave bool + switch c.Transition { + case ConfChangeTransitionAuto: + autoLeave = true + case ConfChangeTransitionJointImplicit: + autoLeave = true + case ConfChangeTransitionJointExplicit: + default: + panic(fmt.Sprintf("unknown transition: %+v", c)) + } + return autoLeave, true + } + return false, false +} + +// LeaveJoint is true if the configuration change leaves a joint configuration. +// This is the case if the ConfChangeV2 is zero, with the possible exception of +// the Context field. +func (c *ConfChangeV2) LeaveJoint() bool { + cpy := *c + cpy.Context = nil + return proto.Equal(&cpy, &ConfChangeV2{}) +} + +// ConfChangesFromString parses a Space-delimited sequence of operations into a +// slice of ConfChangeSingle. The supported operations are: +// - vn: make n a voter, +// - ln: make n a learner, +// - rn: remove n, and +// - un: update n. +func ConfChangesFromString(s string) ([]ConfChangeSingle, error) { + var ccs []ConfChangeSingle + toks := strings.Split(strings.TrimSpace(s), " ") + if toks[0] == "" { + toks = nil + } + for _, tok := range toks { + if len(tok) < 2 { + return nil, fmt.Errorf("unknown token %s", tok) + } + var cc ConfChangeSingle + switch tok[0] { + case 'v': + cc.Type = ConfChangeAddNode + case 'l': + cc.Type = ConfChangeAddLearnerNode + case 'r': + cc.Type = ConfChangeRemoveNode + case 'u': + cc.Type = ConfChangeUpdateNode + default: + return nil, fmt.Errorf("unknown input: %s", tok) + } + id, err := strconv.ParseUint(tok[1:], 10, 64) + if err != nil { + return nil, err + } + cc.NodeID = id + ccs = append(ccs, cc) + } + return ccs, nil +} + +// ConfChangesToString is the inverse to ConfChangesFromString. +func ConfChangesToString(ccs []ConfChangeSingle) string { + var buf strings.Builder + for i, cc := range ccs { + if i > 0 { + buf.WriteByte(' ') + } + switch cc.Type { + case ConfChangeAddNode: + buf.WriteByte('v') + case ConfChangeAddLearnerNode: + buf.WriteByte('l') + case ConfChangeRemoveNode: + buf.WriteByte('r') + case ConfChangeUpdateNode: + buf.WriteByte('u') + default: + buf.WriteString("unknown") + } + fmt.Fprintf(&buf, "%d", cc.NodeID) + } + return buf.String() +} diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go b/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go new file mode 100644 index 0000000000..4bda93214b --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go @@ -0,0 +1,45 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftpb + +import ( + "fmt" + "reflect" + "sort" +) + +// Equivalent returns a nil error if the inputs describe the same configuration. +// On mismatch, returns a descriptive error showing the differences. +func (cs ConfState) Equivalent(cs2 ConfState) error { + cs1 := cs + orig1, orig2 := cs1, cs2 + s := func(sl *[]uint64) { + *sl = append([]uint64(nil), *sl...) + sort.Slice(*sl, func(i, j int) bool { return (*sl)[i] < (*sl)[j] }) + } + + for _, cs := range []*ConfState{&cs1, &cs2} { + s(&cs.Voters) + s(&cs.Learners) + s(&cs.VotersOutgoing) + s(&cs.LearnersNext) + cs.XXX_unrecognized = nil + } + + if !reflect.DeepEqual(cs1, cs2) { + return fmt.Errorf("ConfStates not equivalent after sorting:\n%+#v\n%+#v\nInputs were:\n%+#v\n%+#v", cs1, cs2, orig1, orig2) + } + return nil +} diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go b/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go new file mode 100644 index 0000000000..fcf259c89b --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go @@ -0,0 +1,2646 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: raft.proto + +/* + Package raftpb is a generated protocol buffer package. + + It is generated from these files: + raft.proto + + It has these top-level messages: + Entry + SnapshotMetadata + Snapshot + Message + HardState + ConfState + ConfChange + ConfChangeSingle + ConfChangeV2 +*/ +package raftpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type EntryType int32 + +const ( + EntryNormal EntryType = 0 + EntryConfChange EntryType = 1 + EntryConfChangeV2 EntryType = 2 +) + +var EntryType_name = map[int32]string{ + 0: "EntryNormal", + 1: "EntryConfChange", + 2: "EntryConfChangeV2", +} +var EntryType_value = map[string]int32{ + "EntryNormal": 0, + "EntryConfChange": 1, + "EntryConfChangeV2": 2, +} + +func (x EntryType) Enum() *EntryType { + p := new(EntryType) + *p = x + return p +} +func (x EntryType) String() string { + return proto.EnumName(EntryType_name, int32(x)) +} +func (x *EntryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") + if err != nil { + return err + } + *x = EntryType(value) + return nil +} +func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type MessageType int32 + +const ( + MsgHup MessageType = 0 + MsgBeat MessageType = 1 + MsgProp MessageType = 2 + MsgApp MessageType = 3 + MsgAppResp MessageType = 4 + MsgVote MessageType = 5 + MsgVoteResp MessageType = 6 + MsgSnap MessageType = 7 + MsgHeartbeat MessageType = 8 + MsgHeartbeatResp MessageType = 9 + MsgUnreachable MessageType = 10 + MsgSnapStatus MessageType = 11 + MsgCheckQuorum MessageType = 12 + MsgTransferLeader MessageType = 13 + MsgTimeoutNow MessageType = 14 + MsgReadIndex MessageType = 15 + MsgReadIndexResp MessageType = 16 + MsgPreVote MessageType = 17 + MsgPreVoteResp MessageType = 18 +) + +var MessageType_name = map[int32]string{ + 0: "MsgHup", + 1: "MsgBeat", + 2: "MsgProp", + 3: "MsgApp", + 4: "MsgAppResp", + 5: "MsgVote", + 6: "MsgVoteResp", + 7: "MsgSnap", + 8: "MsgHeartbeat", + 9: "MsgHeartbeatResp", + 10: "MsgUnreachable", + 11: "MsgSnapStatus", + 12: "MsgCheckQuorum", + 13: "MsgTransferLeader", + 14: "MsgTimeoutNow", + 15: "MsgReadIndex", + 16: "MsgReadIndexResp", + 17: "MsgPreVote", + 18: "MsgPreVoteResp", +} +var MessageType_value = map[string]int32{ + "MsgHup": 0, + "MsgBeat": 1, + "MsgProp": 2, + "MsgApp": 3, + "MsgAppResp": 4, + "MsgVote": 5, + "MsgVoteResp": 6, + "MsgSnap": 7, + "MsgHeartbeat": 8, + "MsgHeartbeatResp": 9, + "MsgUnreachable": 10, + "MsgSnapStatus": 11, + "MsgCheckQuorum": 12, + "MsgTransferLeader": 13, + "MsgTimeoutNow": 14, + "MsgReadIndex": 15, + "MsgReadIndexResp": 16, + "MsgPreVote": 17, + "MsgPreVoteResp": 18, +} + +func (x MessageType) Enum() *MessageType { + p := new(MessageType) + *p = x + return p +} +func (x MessageType) String() string { + return proto.EnumName(MessageType_name, int32(x)) +} +func (x *MessageType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") + if err != nil { + return err + } + *x = MessageType(value) + return nil +} +func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +// ConfChangeTransition specifies the behavior of a configuration change with +// respect to joint consensus. +type ConfChangeTransition int32 + +const ( + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeJointImplicit. Most applications will want to use this. + ConfChangeTransitionAuto ConfChangeTransition = 0 + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + ConfChangeTransitionJointImplicit ConfChangeTransition = 1 + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + ConfChangeTransitionJointExplicit ConfChangeTransition = 2 +) + +var ConfChangeTransition_name = map[int32]string{ + 0: "ConfChangeTransitionAuto", + 1: "ConfChangeTransitionJointImplicit", + 2: "ConfChangeTransitionJointExplicit", +} +var ConfChangeTransition_value = map[string]int32{ + "ConfChangeTransitionAuto": 0, + "ConfChangeTransitionJointImplicit": 1, + "ConfChangeTransitionJointExplicit": 2, +} + +func (x ConfChangeTransition) Enum() *ConfChangeTransition { + p := new(ConfChangeTransition) + *p = x + return p +} +func (x ConfChangeTransition) String() string { + return proto.EnumName(ConfChangeTransition_name, int32(x)) +} +func (x *ConfChangeTransition) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeTransition_value, data, "ConfChangeTransition") + if err != nil { + return err + } + *x = ConfChangeTransition(value) + return nil +} +func (ConfChangeTransition) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type ConfChangeType int32 + +const ( + ConfChangeAddNode ConfChangeType = 0 + ConfChangeRemoveNode ConfChangeType = 1 + ConfChangeUpdateNode ConfChangeType = 2 + ConfChangeAddLearnerNode ConfChangeType = 3 +) + +var ConfChangeType_name = map[int32]string{ + 0: "ConfChangeAddNode", + 1: "ConfChangeRemoveNode", + 2: "ConfChangeUpdateNode", + 3: "ConfChangeAddLearnerNode", +} +var ConfChangeType_value = map[string]int32{ + "ConfChangeAddNode": 0, + "ConfChangeRemoveNode": 1, + "ConfChangeUpdateNode": 2, + "ConfChangeAddLearnerNode": 3, +} + +func (x ConfChangeType) Enum() *ConfChangeType { + p := new(ConfChangeType) + *p = x + return p +} +func (x ConfChangeType) String() string { + return proto.EnumName(ConfChangeType_name, int32(x)) +} +func (x *ConfChangeType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") + if err != nil { + return err + } + *x = ConfChangeType(value) + return nil +} +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type Entry struct { + Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` + Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` + Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` + Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type SnapshotMetadata struct { + ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` + Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type Snapshot struct { + Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Message struct { + Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` + To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` + From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` + Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` + LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` + Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` + Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` + Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` + Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` + Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` + RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` + Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type HardState struct { + Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` + Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` + Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } + +type ConfState struct { + // The voters in the incoming config. (If the configuration is not joint, + // then the outgoing config is empty). + Voters []uint64 `protobuf:"varint,1,rep,name=voters" json:"voters,omitempty"` + // The learners in the incoming config. + Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"` + // The voters in the outgoing config. + VotersOutgoing []uint64 `protobuf:"varint,3,rep,name=voters_outgoing,json=votersOutgoing" json:"voters_outgoing,omitempty"` + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + LearnersNext []uint64 `protobuf:"varint,4,rep,name=learners_next,json=learnersNext" json:"learners_next,omitempty"` + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + AutoLeave bool `protobuf:"varint,5,opt,name=auto_leave,json=autoLeave" json:"auto_leave"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } + +type ConfChange struct { + Type ConfChangeType `protobuf:"varint,2,opt,name=type,enum=raftpb.ConfChangeType" json:"type"` + NodeID uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId" json:"node_id"` + Context []byte `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"` + // NB: this is used only by etcd to thread through a unique identifier. + // Ideally it should really use the Context instead. No counterpart to + // this field exists in ConfChangeV2. + ID uint64 `protobuf:"varint,1,opt,name=id" json:"id"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChange) Reset() { *m = ConfChange{} } +func (m *ConfChange) String() string { return proto.CompactTextString(m) } +func (*ConfChange) ProtoMessage() {} +func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +type ConfChangeSingle struct { + Type ConfChangeType `protobuf:"varint,1,opt,name=type,enum=raftpb.ConfChangeType" json:"type"` + NodeID uint64 `protobuf:"varint,2,opt,name=node_id,json=nodeId" json:"node_id"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChangeSingle) Reset() { *m = ConfChangeSingle{} } +func (m *ConfChangeSingle) String() string { return proto.CompactTextString(m) } +func (*ConfChangeSingle) ProtoMessage() {} +func (*ConfChangeSingle) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} } + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +type ConfChangeV2 struct { + Transition ConfChangeTransition `protobuf:"varint,1,opt,name=transition,enum=raftpb.ConfChangeTransition" json:"transition"` + Changes []ConfChangeSingle `protobuf:"bytes,2,rep,name=changes" json:"changes"` + Context []byte `protobuf:"bytes,3,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChangeV2) Reset() { *m = ConfChangeV2{} } +func (m *ConfChangeV2) String() string { return proto.CompactTextString(m) } +func (*ConfChangeV2) ProtoMessage() {} +func (*ConfChangeV2) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{8} } + +func init() { + proto.RegisterType((*Entry)(nil), "raftpb.Entry") + proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") + proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") + proto.RegisterType((*Message)(nil), "raftpb.Message") + proto.RegisterType((*HardState)(nil), "raftpb.HardState") + proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") + proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") + proto.RegisterType((*ConfChangeSingle)(nil), "raftpb.ConfChangeSingle") + proto.RegisterType((*ConfChangeV2)(nil), "raftpb.ConfChangeV2") + proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) + proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("raftpb.ConfChangeTransition", ConfChangeTransition_name, ConfChangeTransition_value) + proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) +} +func (m *Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Entry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if m.Data != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ConfState.Size())) + n1, err := m.ConfState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Data != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Metadata.Size())) + n2, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.To)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.From)) + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x28 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) + dAtA[i] = 0x30 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x40 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Snapshot.Size())) + n3, err := m.Snapshot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x50 + i++ + if m.Reject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) + if m.Context != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *HardState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HardState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Voters) > 0 { + for _, num := range m.Voters { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.Learners) > 0 { + for _, num := range m.Learners { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.VotersOutgoing) > 0 { + for _, num := range m.VotersOutgoing { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.LearnersNext) > 0 { + for _, num := range m.LearnersNext { + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + dAtA[i] = 0x28 + i++ + if m.AutoLeave { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.Context != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChangeSingle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChangeSingle) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChangeV2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChangeV2) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Transition)) + if len(m.Changes) > 0 { + for _, msg := range m.Changes { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Context != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Entry) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Index)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotMetadata) Size() (n int) { + var l int + _ = l + l = m.ConfState.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 1 + sovRaft(uint64(m.Index)) + n += 1 + sovRaft(uint64(m.Term)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + l = m.Metadata.Size() + n += 1 + l + sovRaft(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.To)) + n += 1 + sovRaft(uint64(m.From)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.LogTerm)) + n += 1 + sovRaft(uint64(m.Index)) + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + n += 1 + sovRaft(uint64(m.Commit)) + l = m.Snapshot.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 2 + n += 1 + sovRaft(uint64(m.RejectHint)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HardState) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Vote)) + n += 1 + sovRaft(uint64(m.Commit)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfState) Size() (n int) { + var l int + _ = l + if len(m.Voters) > 0 { + for _, e := range m.Voters { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.Learners) > 0 { + for _, e := range m.Learners { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.VotersOutgoing) > 0 { + for _, e := range m.VotersOutgoing { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.LearnersNext) > 0 { + for _, e := range m.LearnersNext { + n += 1 + sovRaft(uint64(e)) + } + } + n += 2 + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChange) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.ID)) + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChangeSingle) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChangeV2) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Transition)) + if len(m.Changes) > 0 { + for _, e := range m.Changes { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRaft(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaft(x uint64) (n int) { + return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (EntryType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + m.To = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.To |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType) + } + m.LogTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Reject = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType) + } + m.RejectHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectHint |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HardState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HardState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Voters = append(m.Voters, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Voters = append(m.Voters, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Voters", wireType) + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType) + } + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.VotersOutgoing = append(m.VotersOutgoing, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.VotersOutgoing = append(m.VotersOutgoing, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field VotersOutgoing", wireType) + } + case 4: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LearnersNext = append(m.LearnersNext, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LearnersNext = append(m.LearnersNext, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LearnersNext", wireType) + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoLeave", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoLeave = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChangeSingle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChangeSingle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChangeSingle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChangeV2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChangeV2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChangeV2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Transition", wireType) + } + m.Transition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Transition |= (ConfChangeTransition(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Changes = append(m.Changes, ConfChangeSingle{}) + if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaft(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaft + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaft(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 1009 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcd, 0x6e, 0xe3, 0x36, + 0x17, 0xb5, 0x64, 0xc5, 0x3f, 0xd7, 0x8e, 0xc3, 0xdc, 0xc9, 0x37, 0x20, 0x82, 0xc0, 0xe3, 0xcf, + 0xd3, 0x62, 0x8c, 0x14, 0x93, 0x16, 0x5e, 0x14, 0x45, 0x77, 0xf9, 0x19, 0x20, 0x29, 0xe2, 0x74, + 0xea, 0x64, 0xb2, 0x28, 0x50, 0x04, 0x8c, 0x45, 0x2b, 0x6a, 0x2d, 0x51, 0xa0, 0xe8, 0x34, 0xd9, + 0x14, 0x45, 0x9f, 0xa2, 0x9b, 0xd9, 0xf6, 0x01, 0xfa, 0x14, 0x59, 0x0e, 0xd0, 0xfd, 0xa0, 0x93, + 0xbe, 0x48, 0x41, 0x8a, 0xb2, 0x65, 0x27, 0x98, 0x45, 0x77, 0xe4, 0x39, 0x87, 0xf7, 0x9e, 0x7b, + 0x79, 0x45, 0x01, 0x48, 0x36, 0x56, 0x3b, 0x89, 0x14, 0x4a, 0x60, 0x45, 0xaf, 0x93, 0xcb, 0xcd, + 0x8d, 0x40, 0x04, 0xc2, 0x40, 0x9f, 0xeb, 0x55, 0xc6, 0x76, 0x7f, 0x81, 0x95, 0x57, 0xb1, 0x92, + 0xb7, 0xf8, 0x19, 0x78, 0x67, 0xb7, 0x09, 0xa7, 0x4e, 0xc7, 0xe9, 0xb5, 0xfa, 0xeb, 0x3b, 0xd9, + 0xa9, 0x1d, 0x43, 0x6a, 0x62, 0xcf, 0xbb, 0x7b, 0xff, 0xac, 0x34, 0x34, 0x22, 0xa4, 0xe0, 0x9d, + 0x71, 0x19, 0x51, 0xb7, 0xe3, 0xf4, 0xbc, 0x19, 0xc3, 0x65, 0x84, 0x9b, 0xb0, 0x72, 0x14, 0xfb, + 0xfc, 0x86, 0x96, 0x0b, 0x54, 0x06, 0x21, 0x82, 0x77, 0xc0, 0x14, 0xa3, 0x5e, 0xc7, 0xe9, 0x35, + 0x87, 0x66, 0xdd, 0xfd, 0xd5, 0x01, 0x72, 0x1a, 0xb3, 0x24, 0xbd, 0x12, 0x6a, 0xc0, 0x15, 0xf3, + 0x99, 0x62, 0xf8, 0x25, 0xc0, 0x48, 0xc4, 0xe3, 0x8b, 0x54, 0x31, 0x95, 0x39, 0x6a, 0xcc, 0x1d, + 0xed, 0x8b, 0x78, 0x7c, 0xaa, 0x09, 0x1b, 0xbc, 0x3e, 0xca, 0x01, 0x9d, 0x3c, 0x34, 0xc9, 0x8b, + 0xbe, 0x32, 0x48, 0x5b, 0x56, 0xda, 0x72, 0xd1, 0x97, 0x41, 0xba, 0xdf, 0x43, 0x2d, 0x77, 0xa0, + 0x2d, 0x6a, 0x07, 0x26, 0x67, 0x73, 0x68, 0xd6, 0xf8, 0x35, 0xd4, 0x22, 0xeb, 0xcc, 0x04, 0x6e, + 0xf4, 0x69, 0xee, 0x65, 0xd9, 0xb9, 0x8d, 0x3b, 0xd3, 0x77, 0xdf, 0x96, 0xa1, 0x3a, 0xe0, 0x69, + 0xca, 0x02, 0x8e, 0x2f, 0xc1, 0x53, 0xf3, 0x0e, 0x3f, 0xc9, 0x63, 0x58, 0xba, 0xd8, 0x63, 0x2d, + 0xc3, 0x0d, 0x70, 0x95, 0x58, 0xa8, 0xc4, 0x55, 0x42, 0x97, 0x31, 0x96, 0x62, 0xa9, 0x0c, 0x8d, + 0xcc, 0x0a, 0xf4, 0x96, 0x0b, 0xc4, 0x36, 0x54, 0x27, 0x22, 0x30, 0x17, 0xb6, 0x52, 0x20, 0x73, + 0x70, 0xde, 0xb6, 0xca, 0xc3, 0xb6, 0xbd, 0x84, 0x2a, 0x8f, 0x95, 0x0c, 0x79, 0x4a, 0xab, 0x9d, + 0x72, 0xaf, 0xd1, 0x5f, 0x5d, 0x98, 0x8c, 0x3c, 0x94, 0xd5, 0xe0, 0x16, 0x54, 0x46, 0x22, 0x8a, + 0x42, 0x45, 0x6b, 0x85, 0x58, 0x16, 0xc3, 0x3e, 0xd4, 0x52, 0xdb, 0x31, 0x5a, 0x37, 0x9d, 0x24, + 0xcb, 0x9d, 0xcc, 0x3b, 0x98, 0xeb, 0x74, 0x44, 0xc9, 0x7f, 0xe4, 0x23, 0x45, 0xa1, 0xe3, 0xf4, + 0x6a, 0x79, 0xc4, 0x0c, 0xc3, 0x4f, 0x00, 0xb2, 0xd5, 0x61, 0x18, 0x2b, 0xda, 0x28, 0xe4, 0x2c, + 0xe0, 0x48, 0xa1, 0x3a, 0x12, 0xb1, 0xe2, 0x37, 0x8a, 0x36, 0xcd, 0xc5, 0xe6, 0xdb, 0xee, 0x0f, + 0x50, 0x3f, 0x64, 0xd2, 0xcf, 0xc6, 0x27, 0xef, 0xa0, 0xf3, 0xa0, 0x83, 0x14, 0xbc, 0x6b, 0xa1, + 0xf8, 0xe2, 0xbc, 0x6b, 0xa4, 0x50, 0x70, 0xf9, 0x61, 0xc1, 0xdd, 0x3f, 0x1d, 0xa8, 0xcf, 0xe6, + 0x15, 0x9f, 0x42, 0x45, 0x9f, 0x91, 0x29, 0x75, 0x3a, 0xe5, 0x9e, 0x37, 0xb4, 0x3b, 0xdc, 0x84, + 0xda, 0x84, 0x33, 0x19, 0x6b, 0xc6, 0x35, 0xcc, 0x6c, 0x8f, 0x2f, 0x60, 0x2d, 0x53, 0x5d, 0x88, + 0xa9, 0x0a, 0x44, 0x18, 0x07, 0xb4, 0x6c, 0x24, 0xad, 0x0c, 0xfe, 0xd6, 0xa2, 0xf8, 0x1c, 0x56, + 0xf3, 0x43, 0x17, 0xb1, 0xae, 0xd4, 0x33, 0xb2, 0x66, 0x0e, 0x9e, 0xf0, 0x1b, 0x85, 0xcf, 0x01, + 0xd8, 0x54, 0x89, 0x8b, 0x09, 0x67, 0xd7, 0xdc, 0x0c, 0x43, 0xde, 0xd0, 0xba, 0xc6, 0x8f, 0x35, + 0xdc, 0x7d, 0xeb, 0x00, 0x68, 0xd3, 0xfb, 0x57, 0x2c, 0x0e, 0xf4, 0x47, 0xe5, 0x86, 0xbe, 0xed, + 0x09, 0x68, 0xed, 0xfd, 0xfb, 0x67, 0xee, 0xd1, 0xc1, 0xd0, 0x0d, 0x7d, 0xfc, 0xc2, 0x8e, 0xb4, + 0x6b, 0x46, 0xfa, 0x69, 0xf1, 0x13, 0xcd, 0x4e, 0x3f, 0x98, 0xea, 0x17, 0x50, 0x8d, 0x85, 0xcf, + 0x2f, 0x42, 0xdf, 0x36, 0xac, 0x65, 0x43, 0x56, 0x4e, 0x84, 0xcf, 0x8f, 0x0e, 0x86, 0x15, 0x4d, + 0x1f, 0xf9, 0xc5, 0x3b, 0xf3, 0x16, 0xef, 0x2c, 0x02, 0x32, 0x4f, 0x70, 0x1a, 0xc6, 0xc1, 0x84, + 0xcf, 0x8c, 0x38, 0xff, 0xc5, 0x88, 0xfb, 0x31, 0x23, 0xdd, 0x3f, 0x1c, 0x68, 0xce, 0xe3, 0x9c, + 0xf7, 0x71, 0x0f, 0x40, 0x49, 0x16, 0xa7, 0xa1, 0x0a, 0x45, 0x6c, 0x33, 0x6e, 0x3d, 0x92, 0x71, + 0xa6, 0xc9, 0x27, 0x72, 0x7e, 0x0a, 0xbf, 0x82, 0xea, 0xc8, 0xa8, 0xb2, 0x1b, 0x2f, 0x3c, 0x29, + 0xcb, 0xa5, 0xe5, 0x5f, 0x98, 0x95, 0x17, 0xfb, 0x52, 0x5e, 0xe8, 0xcb, 0xf6, 0x21, 0xd4, 0x67, + 0xaf, 0x35, 0xae, 0x41, 0xc3, 0x6c, 0x4e, 0x84, 0x8c, 0xd8, 0x84, 0x94, 0xf0, 0x09, 0xac, 0x19, + 0x60, 0x1e, 0x9f, 0x38, 0xf8, 0x3f, 0x58, 0x5f, 0x02, 0xcf, 0xfb, 0xc4, 0xdd, 0xfe, 0xcb, 0x85, + 0x46, 0xe1, 0x59, 0x42, 0x80, 0xca, 0x20, 0x0d, 0x0e, 0xa7, 0x09, 0x29, 0x61, 0x03, 0xaa, 0x83, + 0x34, 0xd8, 0xe3, 0x4c, 0x11, 0xc7, 0x6e, 0x5e, 0x4b, 0x91, 0x10, 0xd7, 0xaa, 0x76, 0x93, 0x84, + 0x94, 0xb1, 0x05, 0x90, 0xad, 0x87, 0x3c, 0x4d, 0x88, 0x67, 0x85, 0xe7, 0x42, 0x71, 0xb2, 0xa2, + 0xbd, 0xd9, 0x8d, 0x61, 0x2b, 0x96, 0xd5, 0x4f, 0x00, 0xa9, 0x22, 0x81, 0xa6, 0x4e, 0xc6, 0x99, + 0x54, 0x97, 0x3a, 0x4b, 0x0d, 0x37, 0x80, 0x14, 0x11, 0x73, 0xa8, 0x8e, 0x08, 0xad, 0x41, 0x1a, + 0xbc, 0x89, 0x25, 0x67, 0xa3, 0x2b, 0x76, 0x39, 0xe1, 0x04, 0x70, 0x1d, 0x56, 0x6d, 0x20, 0xfd, + 0xc5, 0x4d, 0x53, 0xd2, 0xb0, 0xb2, 0xfd, 0x2b, 0x3e, 0xfa, 0xe9, 0xbb, 0xa9, 0x90, 0xd3, 0x88, + 0x34, 0x75, 0xd9, 0x83, 0x34, 0x30, 0x17, 0x34, 0xe6, 0xf2, 0x98, 0x33, 0x9f, 0x4b, 0xb2, 0x6a, + 0x4f, 0x9f, 0x85, 0x11, 0x17, 0x53, 0x75, 0x22, 0x7e, 0x26, 0x2d, 0x6b, 0x66, 0xc8, 0x99, 0x6f, + 0x7e, 0x61, 0x64, 0xcd, 0x9a, 0x99, 0x21, 0xc6, 0x0c, 0xb1, 0xf5, 0xbe, 0x96, 0xdc, 0x94, 0xb8, + 0x6e, 0xb3, 0xda, 0xbd, 0xd1, 0xe0, 0xf6, 0x6f, 0x0e, 0x6c, 0x3c, 0x36, 0x1e, 0xb8, 0x05, 0xf4, + 0x31, 0x7c, 0x77, 0xaa, 0x04, 0x29, 0xe1, 0xa7, 0xf0, 0xff, 0xc7, 0xd8, 0x6f, 0x44, 0x18, 0xab, + 0xa3, 0x28, 0x99, 0x84, 0xa3, 0x50, 0x5f, 0xc5, 0xc7, 0x64, 0xaf, 0x6e, 0xac, 0xcc, 0xdd, 0xbe, + 0x85, 0xd6, 0xe2, 0x47, 0xa1, 0x9b, 0x31, 0x47, 0x76, 0x7d, 0x5f, 0x8f, 0x3f, 0x29, 0x21, 0x2d, + 0x9a, 0x1d, 0xf2, 0x48, 0x5c, 0x73, 0xc3, 0x38, 0x8b, 0xcc, 0x9b, 0xc4, 0x67, 0x2a, 0x63, 0xdc, + 0xc5, 0x42, 0x76, 0x7d, 0xff, 0x38, 0x7b, 0x7b, 0x0c, 0x5b, 0xde, 0xa3, 0x77, 0x1f, 0xda, 0xa5, + 0x77, 0x1f, 0xda, 0xa5, 0xbb, 0xfb, 0xb6, 0xf3, 0xee, 0xbe, 0xed, 0xfc, 0x7d, 0xdf, 0x76, 0x7e, + 0xff, 0xa7, 0x5d, 0xfa, 0x37, 0x00, 0x00, 0xff, 0xff, 0x87, 0x11, 0x6d, 0xd6, 0xaf, 0x08, 0x00, + 0x00, +} diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto b/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto new file mode 100644 index 0000000000..23d62ec2fb --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto @@ -0,0 +1,177 @@ +syntax = "proto2"; +package raftpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +enum EntryType { + EntryNormal = 0; + EntryConfChange = 1; // corresponds to pb.ConfChange + EntryConfChangeV2 = 2; // corresponds to pb.ConfChangeV2 +} + +message Entry { + optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional EntryType Type = 1 [(gogoproto.nullable) = false]; + optional bytes Data = 4; +} + +message SnapshotMetadata { + optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; + optional uint64 index = 2 [(gogoproto.nullable) = false]; + optional uint64 term = 3 [(gogoproto.nullable) = false]; +} + +message Snapshot { + optional bytes data = 1; + optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; +} + +enum MessageType { + MsgHup = 0; + MsgBeat = 1; + MsgProp = 2; + MsgApp = 3; + MsgAppResp = 4; + MsgVote = 5; + MsgVoteResp = 6; + MsgSnap = 7; + MsgHeartbeat = 8; + MsgHeartbeatResp = 9; + MsgUnreachable = 10; + MsgSnapStatus = 11; + MsgCheckQuorum = 12; + MsgTransferLeader = 13; + MsgTimeoutNow = 14; + MsgReadIndex = 15; + MsgReadIndexResp = 16; + MsgPreVote = 17; + MsgPreVoteResp = 18; +} + +message Message { + optional MessageType type = 1 [(gogoproto.nullable) = false]; + optional uint64 to = 2 [(gogoproto.nullable) = false]; + optional uint64 from = 3 [(gogoproto.nullable) = false]; + optional uint64 term = 4 [(gogoproto.nullable) = false]; + optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; + optional uint64 index = 6 [(gogoproto.nullable) = false]; + repeated Entry entries = 7 [(gogoproto.nullable) = false]; + optional uint64 commit = 8 [(gogoproto.nullable) = false]; + optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; + optional bool reject = 10 [(gogoproto.nullable) = false]; + optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; + optional bytes context = 12; +} + +message HardState { + optional uint64 term = 1 [(gogoproto.nullable) = false]; + optional uint64 vote = 2 [(gogoproto.nullable) = false]; + optional uint64 commit = 3 [(gogoproto.nullable) = false]; +} + +// ConfChangeTransition specifies the behavior of a configuration change with +// respect to joint consensus. +enum ConfChangeTransition { + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeJointImplicit. Most applications will want to use this. + ConfChangeTransitionAuto = 0; + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + ConfChangeTransitionJointImplicit = 1; + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + ConfChangeTransitionJointExplicit = 2; +} + +message ConfState { + // The voters in the incoming config. (If the configuration is not joint, + // then the outgoing config is empty). + repeated uint64 voters = 1; + // The learners in the incoming config. + repeated uint64 learners = 2; + // The voters in the outgoing config. + repeated uint64 voters_outgoing = 3; + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + repeated uint64 learners_next = 4; + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + optional bool auto_leave = 5 [(gogoproto.nullable) = false]; +} + +enum ConfChangeType { + ConfChangeAddNode = 0; + ConfChangeRemoveNode = 1; + ConfChangeUpdateNode = 2; + ConfChangeAddLearnerNode = 3; +} + +message ConfChange { + optional ConfChangeType type = 2 [(gogoproto.nullable) = false]; + optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID" ]; + optional bytes context = 4; + + // NB: this is used only by etcd to thread through a unique identifier. + // Ideally it should really use the Context instead. No counterpart to + // this field exists in ConfChangeV2. + optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID" ]; +} + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +message ConfChangeSingle { + optional ConfChangeType type = 1 [(gogoproto.nullable) = false]; + optional uint64 node_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"]; +} + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +message ConfChangeV2 { + optional ConfChangeTransition transition = 1 [(gogoproto.nullable) = false]; + repeated ConfChangeSingle changes = 2 [(gogoproto.nullable) = false]; + optional bytes context = 3; +} diff --git a/vendor/go.etcd.io/etcd/raft/rawnode.go b/vendor/go.etcd.io/etcd/raft/rawnode.go new file mode 100644 index 0000000000..90eb69493c --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/rawnode.go @@ -0,0 +1,239 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// ErrStepLocalMsg is returned when try to step a local raft message +var ErrStepLocalMsg = errors.New("raft: cannot step raft local message") + +// ErrStepPeerNotFound is returned when try to step a response message +// but there is no peer found in raft.prs for that node. +var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found") + +// RawNode is a thread-unsafe Node. +// The methods of this struct correspond to the methods of Node and are described +// more fully there. +type RawNode struct { + raft *raft + prevSoftSt *SoftState + prevHardSt pb.HardState +} + +// NewRawNode instantiates a RawNode from the given configuration. +// +// See Bootstrap() for bootstrapping an initial state; this replaces the former +// 'peers' argument to this method (with identical behavior). However, It is +// recommended that instead of calling Bootstrap, applications bootstrap their +// state manually by setting up a Storage that has a first index > 1 and which +// stores the desired ConfState as its InitialState. +func NewRawNode(config *Config) (*RawNode, error) { + r := newRaft(config) + rn := &RawNode{ + raft: r, + } + rn.prevSoftSt = r.softState() + rn.prevHardSt = r.hardState() + return rn, nil +} + +// Tick advances the internal logical clock by a single tick. +func (rn *RawNode) Tick() { + rn.raft.tick() +} + +// TickQuiesced advances the internal logical clock by a single tick without +// performing any other state machine processing. It allows the caller to avoid +// periodic heartbeats and elections when all of the peers in a Raft group are +// known to be at the same state. Expected usage is to periodically invoke Tick +// or TickQuiesced depending on whether the group is "active" or "quiesced". +// +// WARNING: Be very careful about using this method as it subverts the Raft +// state machine. You should probably be using Tick instead. +func (rn *RawNode) TickQuiesced() { + rn.raft.electionElapsed++ +} + +// Campaign causes this RawNode to transition to candidate state. +func (rn *RawNode) Campaign() error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgHup, + }) +} + +// Propose proposes data be appended to the raft log. +func (rn *RawNode) Propose(data []byte) error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgProp, + From: rn.raft.id, + Entries: []pb.Entry{ + {Data: data}, + }}) +} + +// ProposeConfChange proposes a config change. See (Node).ProposeConfChange for +// details. +func (rn *RawNode) ProposeConfChange(cc pb.ConfChangeI) error { + m, err := confChangeToMsg(cc) + if err != nil { + return err + } + return rn.raft.Step(m) +} + +// ApplyConfChange applies a config change to the local node. +func (rn *RawNode) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState { + cs := rn.raft.applyConfChange(cc.AsV2()) + return &cs +} + +// Step advances the state machine using the given message. +func (rn *RawNode) Step(m pb.Message) error { + // ignore unexpected local messages receiving over network + if IsLocalMsg(m.Type) { + return ErrStepLocalMsg + } + if pr := rn.raft.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) { + return rn.raft.Step(m) + } + return ErrStepPeerNotFound +} + +// Ready returns the outstanding work that the application needs to handle. This +// includes appending and applying entries or a snapshot, updating the HardState, +// and sending messages. The returned Ready() *must* be handled and subsequently +// passed back via Advance(). +func (rn *RawNode) Ready() Ready { + rd := rn.readyWithoutAccept() + rn.acceptReady(rd) + return rd +} + +// readyWithoutAccept returns a Ready. This is a read-only operation, i.e. there +// is no obligation that the Ready must be handled. +func (rn *RawNode) readyWithoutAccept() Ready { + return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) +} + +// acceptReady is called when the consumer of the RawNode has decided to go +// ahead and handle a Ready. Nothing must alter the state of the RawNode between +// this call and the prior call to Ready(). +func (rn *RawNode) acceptReady(rd Ready) { + if rd.SoftState != nil { + rn.prevSoftSt = rd.SoftState + } + if len(rd.ReadStates) != 0 { + rn.raft.readStates = nil + } + rn.raft.msgs = nil +} + +// HasReady called when RawNode user need to check if any Ready pending. +// Checking logic in this method should be consistent with Ready.containsUpdates(). +func (rn *RawNode) HasReady() bool { + r := rn.raft + if !r.softState().equal(rn.prevSoftSt) { + return true + } + if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) { + return true + } + if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) { + return true + } + if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() { + return true + } + if len(r.readStates) != 0 { + return true + } + return false +} + +// Advance notifies the RawNode that the application has applied and saved progress in the +// last Ready results. +func (rn *RawNode) Advance(rd Ready) { + if !IsEmptyHardState(rd.HardState) { + rn.prevHardSt = rd.HardState + } + rn.raft.advance(rd) +} + +// Status returns the current status of the given group. This allocates, see +// BasicStatus and WithProgress for allocation-friendlier choices. +func (rn *RawNode) Status() Status { + status := getStatus(rn.raft) + return status +} + +// BasicStatus returns a BasicStatus. Notably this does not contain the +// Progress map; see WithProgress for an allocation-free way to inspect it. +func (rn *RawNode) BasicStatus() BasicStatus { + return getBasicStatus(rn.raft) +} + +// ProgressType indicates the type of replica a Progress corresponds to. +type ProgressType byte + +const ( + // ProgressTypePeer accompanies a Progress for a regular peer replica. + ProgressTypePeer ProgressType = iota + // ProgressTypeLearner accompanies a Progress for a learner replica. + ProgressTypeLearner +) + +// WithProgress is a helper to introspect the Progress for this node and its +// peers. +func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr tracker.Progress)) { + rn.raft.prs.Visit(func(id uint64, pr *tracker.Progress) { + typ := ProgressTypePeer + if pr.IsLearner { + typ = ProgressTypeLearner + } + p := *pr + p.Inflights = nil + visitor(id, typ, p) + }) +} + +// ReportUnreachable reports the given node is not reachable for the last send. +func (rn *RawNode) ReportUnreachable(id uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id}) +} + +// ReportSnapshot reports the status of the sent snapshot. +func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}) +} + +// TransferLeader tries to transfer leadership to the given transferee. +func (rn *RawNode) TransferLeader(transferee uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee}) +} + +// ReadIndex requests a read state. The read state will be set in ready. +// Read State has a read index. Once the application advances further than the read +// index, any linearizable read requests issued before the read request can be +// processed safely. The read state will have the same rctx attached. +func (rn *RawNode) ReadIndex(rctx []byte) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) +} diff --git a/vendor/go.etcd.io/etcd/raft/read_only.go b/vendor/go.etcd.io/etcd/raft/read_only.go new file mode 100644 index 0000000000..6987f1bd7d --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/read_only.go @@ -0,0 +1,121 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "go.etcd.io/etcd/raft/raftpb" + +// ReadState provides state for read only query. +// It's caller's responsibility to call ReadIndex first before getting +// this state from ready, it's also caller's duty to differentiate if this +// state is what it requests through RequestCtx, eg. given a unique id as +// RequestCtx +type ReadState struct { + Index uint64 + RequestCtx []byte +} + +type readIndexStatus struct { + req pb.Message + index uint64 + // NB: this never records 'false', but it's more convenient to use this + // instead of a map[uint64]struct{} due to the API of quorum.VoteResult. If + // this becomes performance sensitive enough (doubtful), quorum.VoteResult + // can change to an API that is closer to that of CommittedIndex. + acks map[uint64]bool +} + +type readOnly struct { + option ReadOnlyOption + pendingReadIndex map[string]*readIndexStatus + readIndexQueue []string +} + +func newReadOnly(option ReadOnlyOption) *readOnly { + return &readOnly{ + option: option, + pendingReadIndex: make(map[string]*readIndexStatus), + } +} + +// addRequest adds a read only reuqest into readonly struct. +// `index` is the commit index of the raft state machine when it received +// the read only request. +// `m` is the original read only request message from the local or remote node. +func (ro *readOnly) addRequest(index uint64, m pb.Message) { + s := string(m.Entries[0].Data) + if _, ok := ro.pendingReadIndex[s]; ok { + return + } + ro.pendingReadIndex[s] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]bool)} + ro.readIndexQueue = append(ro.readIndexQueue, s) +} + +// recvAck notifies the readonly struct that the raft state machine received +// an acknowledgment of the heartbeat that attached with the read only request +// context. +func (ro *readOnly) recvAck(id uint64, context []byte) map[uint64]bool { + rs, ok := ro.pendingReadIndex[string(context)] + if !ok { + return nil + } + + rs.acks[id] = true + return rs.acks +} + +// advance advances the read only request queue kept by the readonly struct. +// It dequeues the requests until it finds the read only request that has +// the same context as the given `m`. +func (ro *readOnly) advance(m pb.Message) []*readIndexStatus { + var ( + i int + found bool + ) + + ctx := string(m.Context) + rss := []*readIndexStatus{} + + for _, okctx := range ro.readIndexQueue { + i++ + rs, ok := ro.pendingReadIndex[okctx] + if !ok { + panic("cannot find corresponding read state from pending map") + } + rss = append(rss, rs) + if okctx == ctx { + found = true + break + } + } + + if found { + ro.readIndexQueue = ro.readIndexQueue[i:] + for _, rs := range rss { + delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data)) + } + return rss + } + + return nil +} + +// lastPendingRequestCtx returns the context of the last pending read only +// request in readonly struct. +func (ro *readOnly) lastPendingRequestCtx() string { + if len(ro.readIndexQueue) == 0 { + return "" + } + return ro.readIndexQueue[len(ro.readIndexQueue)-1] +} diff --git a/vendor/go.etcd.io/etcd/raft/status.go b/vendor/go.etcd.io/etcd/raft/status.go new file mode 100644 index 0000000000..adc60486d9 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/status.go @@ -0,0 +1,106 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// Status contains information about this Raft peer and its view of the system. +// The Progress is only populated on the leader. +type Status struct { + BasicStatus + Config tracker.Config + Progress map[uint64]tracker.Progress +} + +// BasicStatus contains basic information about the Raft peer. It does not allocate. +type BasicStatus struct { + ID uint64 + + pb.HardState + SoftState + + Applied uint64 + + LeadTransferee uint64 +} + +func getProgressCopy(r *raft) map[uint64]tracker.Progress { + m := make(map[uint64]tracker.Progress) + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + var p tracker.Progress + p = *pr + p.Inflights = pr.Inflights.Clone() + pr = nil + + m[id] = p + }) + return m +} + +func getBasicStatus(r *raft) BasicStatus { + s := BasicStatus{ + ID: r.id, + LeadTransferee: r.leadTransferee, + } + s.HardState = r.hardState() + s.SoftState = *r.softState() + s.Applied = r.raftLog.applied + return s +} + +// getStatus gets a copy of the current raft status. +func getStatus(r *raft) Status { + var s Status + s.BasicStatus = getBasicStatus(r) + if s.RaftState == StateLeader { + s.Progress = getProgressCopy(r) + } + s.Config = r.prs.Config.Clone() + return s +} + +// MarshalJSON translates the raft status into JSON. +// TODO: try to simplify this by introducing ID type into raft +func (s Status) MarshalJSON() ([]byte, error) { + j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`, + s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied) + + if len(s.Progress) == 0 { + j += "}," + } else { + for k, v := range s.Progress { + subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State) + j += subj + } + // remove the trailing "," + j = j[:len(j)-1] + "}," + } + + j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee) + return []byte(j), nil +} + +func (s Status) String() string { + b, err := s.MarshalJSON() + if err != nil { + raftLogger.Panicf("unexpected error: %v", err) + } + return string(b) +} diff --git a/vendor/go.etcd.io/etcd/raft/storage.go b/vendor/go.etcd.io/etcd/raft/storage.go new file mode 100644 index 0000000000..6be574590e --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/storage.go @@ -0,0 +1,273 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + "sync" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +// ErrCompacted is returned by Storage.Entries/Compact when a requested +// index is unavailable because it predates the last snapshot. +var ErrCompacted = errors.New("requested index is unavailable due to compaction") + +// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested +// index is older than the existing snapshot. +var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot") + +// ErrUnavailable is returned by Storage interface when the requested log entries +// are unavailable. +var ErrUnavailable = errors.New("requested entry at index is unavailable") + +// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required +// snapshot is temporarily unavailable. +var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable") + +// Storage is an interface that may be implemented by the application +// to retrieve log entries from storage. +// +// If any Storage method returns an error, the raft instance will +// become inoperable and refuse to participate in elections; the +// application is responsible for cleanup and recovery in this case. +type Storage interface { + // TODO(tbg): split this into two interfaces, LogStorage and StateStorage. + + // InitialState returns the saved HardState and ConfState information. + InitialState() (pb.HardState, pb.ConfState, error) + // Entries returns a slice of log entries in the range [lo,hi). + // MaxSize limits the total size of the log entries returned, but + // Entries returns at least one entry if any. + Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) + // Term returns the term of entry i, which must be in the range + // [FirstIndex()-1, LastIndex()]. The term of the entry before + // FirstIndex is retained for matching purposes even though the + // rest of that entry may not be available. + Term(i uint64) (uint64, error) + // LastIndex returns the index of the last entry in the log. + LastIndex() (uint64, error) + // FirstIndex returns the index of the first log entry that is + // possibly available via Entries (older entries have been incorporated + // into the latest Snapshot; if storage only contains the dummy entry the + // first log entry is not available). + FirstIndex() (uint64, error) + // Snapshot returns the most recent snapshot. + // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable, + // so raft state machine could know that Storage needs some time to prepare + // snapshot and call Snapshot later. + Snapshot() (pb.Snapshot, error) +} + +// MemoryStorage implements the Storage interface backed by an +// in-memory array. +type MemoryStorage struct { + // Protects access to all fields. Most methods of MemoryStorage are + // run on the raft goroutine, but Append() is run on an application + // goroutine. + sync.Mutex + + hardState pb.HardState + snapshot pb.Snapshot + // ents[i] has raft log position i+snapshot.Metadata.Index + ents []pb.Entry +} + +// NewMemoryStorage creates an empty MemoryStorage. +func NewMemoryStorage() *MemoryStorage { + return &MemoryStorage{ + // When starting from scratch populate the list with a dummy entry at term zero. + ents: make([]pb.Entry, 1), + } +} + +// InitialState implements the Storage interface. +func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) { + return ms.hardState, ms.snapshot.Metadata.ConfState, nil +} + +// SetHardState saves the current HardState. +func (ms *MemoryStorage) SetHardState(st pb.HardState) error { + ms.Lock() + defer ms.Unlock() + ms.hardState = st + return nil +} + +// Entries implements the Storage interface. +func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if lo <= offset { + return nil, ErrCompacted + } + if hi > ms.lastIndex()+1 { + raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex()) + } + // only contains dummy entries. + if len(ms.ents) == 1 { + return nil, ErrUnavailable + } + + ents := ms.ents[lo-offset : hi-offset] + return limitSize(ents, maxSize), nil +} + +// Term implements the Storage interface. +func (ms *MemoryStorage) Term(i uint64) (uint64, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if i < offset { + return 0, ErrCompacted + } + if int(i-offset) >= len(ms.ents) { + return 0, ErrUnavailable + } + return ms.ents[i-offset].Term, nil +} + +// LastIndex implements the Storage interface. +func (ms *MemoryStorage) LastIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.lastIndex(), nil +} + +func (ms *MemoryStorage) lastIndex() uint64 { + return ms.ents[0].Index + uint64(len(ms.ents)) - 1 +} + +// FirstIndex implements the Storage interface. +func (ms *MemoryStorage) FirstIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.firstIndex(), nil +} + +func (ms *MemoryStorage) firstIndex() uint64 { + return ms.ents[0].Index + 1 +} + +// Snapshot implements the Storage interface. +func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + return ms.snapshot, nil +} + +// ApplySnapshot overwrites the contents of this Storage object with +// those of the given snapshot. +func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error { + ms.Lock() + defer ms.Unlock() + + //handle check for old snapshot being applied + msIndex := ms.snapshot.Metadata.Index + snapIndex := snap.Metadata.Index + if msIndex >= snapIndex { + return ErrSnapOutOfDate + } + + ms.snapshot = snap + ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}} + return nil +} + +// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and +// can be used to reconstruct the state at that point. +// If any configuration changes have been made since the last compaction, +// the result of the last ApplyConfChange must be passed in. +func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + if i <= ms.snapshot.Metadata.Index { + return pb.Snapshot{}, ErrSnapOutOfDate + } + + offset := ms.ents[0].Index + if i > ms.lastIndex() { + raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex()) + } + + ms.snapshot.Metadata.Index = i + ms.snapshot.Metadata.Term = ms.ents[i-offset].Term + if cs != nil { + ms.snapshot.Metadata.ConfState = *cs + } + ms.snapshot.Data = data + return ms.snapshot, nil +} + +// Compact discards all log entries prior to compactIndex. +// It is the application's responsibility to not attempt to compact an index +// greater than raftLog.applied. +func (ms *MemoryStorage) Compact(compactIndex uint64) error { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if compactIndex <= offset { + return ErrCompacted + } + if compactIndex > ms.lastIndex() { + raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex()) + } + + i := compactIndex - offset + ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) + ents[0].Index = ms.ents[i].Index + ents[0].Term = ms.ents[i].Term + ents = append(ents, ms.ents[i+1:]...) + ms.ents = ents + return nil +} + +// Append the new entries to storage. +// TODO (xiangli): ensure the entries are continuous and +// entries[0].Index > ms.entries[0].Index +func (ms *MemoryStorage) Append(entries []pb.Entry) error { + if len(entries) == 0 { + return nil + } + + ms.Lock() + defer ms.Unlock() + + first := ms.firstIndex() + last := entries[0].Index + uint64(len(entries)) - 1 + + // shortcut if there is no new entry. + if last < first { + return nil + } + // truncate compacted entries + if first > entries[0].Index { + entries = entries[first-entries[0].Index:] + } + + offset := entries[0].Index - ms.ents[0].Index + switch { + case uint64(len(ms.ents)) > offset: + ms.ents = append([]pb.Entry{}, ms.ents[:offset]...) + ms.ents = append(ms.ents, entries...) + case uint64(len(ms.ents)) == offset: + ms.ents = append(ms.ents, entries...) + default: + raftLogger.Panicf("missing log entry [last: %d, append at: %d]", + ms.lastIndex(), entries[0].Index) + } + return nil +} diff --git a/vendor/go.etcd.io/etcd/raft/tracker/inflights.go b/vendor/go.etcd.io/etcd/raft/tracker/inflights.go new file mode 100644 index 0000000000..1a056341ab --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/tracker/inflights.go @@ -0,0 +1,132 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +// Inflights limits the number of MsgApp (represented by the largest index +// contained within) sent to followers but not yet acknowledged by them. Callers +// use Full() to check whether more messages can be sent, call Add() whenever +// they are sending a new append, and release "quota" via FreeLE() whenever an +// ack is received. +type Inflights struct { + // the starting index in the buffer + start int + // number of inflights in the buffer + count int + + // the size of the buffer + size int + + // buffer contains the index of the last entry + // inside one message. + buffer []uint64 +} + +// NewInflights sets up an Inflights that allows up to 'size' inflight messages. +func NewInflights(size int) *Inflights { + return &Inflights{ + size: size, + } +} + +// Clone returns an *Inflights that is identical to but shares no memory with +// the receiver. +func (in *Inflights) Clone() *Inflights { + ins := *in + ins.buffer = append([]uint64(nil), in.buffer...) + return &ins +} + +// Add notifies the Inflights that a new message with the given index is being +// dispatched. Full() must be called prior to Add() to verify that there is room +// for one more message, and consecutive calls to add Add() must provide a +// monotonic sequence of indexes. +func (in *Inflights) Add(inflight uint64) { + if in.Full() { + panic("cannot add into a Full inflights") + } + next := in.start + in.count + size := in.size + if next >= size { + next -= size + } + if next >= len(in.buffer) { + in.grow() + } + in.buffer[next] = inflight + in.count++ +} + +// grow the inflight buffer by doubling up to inflights.size. We grow on demand +// instead of preallocating to inflights.size to handle systems which have +// thousands of Raft groups per process. +func (in *Inflights) grow() { + newSize := len(in.buffer) * 2 + if newSize == 0 { + newSize = 1 + } else if newSize > in.size { + newSize = in.size + } + newBuffer := make([]uint64, newSize) + copy(newBuffer, in.buffer) + in.buffer = newBuffer +} + +// FreeLE frees the inflights smaller or equal to the given `to` flight. +func (in *Inflights) FreeLE(to uint64) { + if in.count == 0 || to < in.buffer[in.start] { + // out of the left side of the window + return + } + + idx := in.start + var i int + for i = 0; i < in.count; i++ { + if to < in.buffer[idx] { // found the first large inflight + break + } + + // increase index and maybe rotate + size := in.size + if idx++; idx >= size { + idx -= size + } + } + // free i inflights and set new start index + in.count -= i + in.start = idx + if in.count == 0 { + // inflights is empty, reset the start index so that we don't grow the + // buffer unnecessarily. + in.start = 0 + } +} + +// FreeFirstOne releases the first inflight. This is a no-op if nothing is +// inflight. +func (in *Inflights) FreeFirstOne() { in.FreeLE(in.buffer[in.start]) } + +// Full returns true if no more messages can be sent at the moment. +func (in *Inflights) Full() bool { + return in.count == in.size +} + +// Count returns the number of inflight messages. +func (in *Inflights) Count() int { return in.count } + +// reset frees all inflights. +func (in *Inflights) reset() { + in.count = 0 + in.start = 0 +} diff --git a/vendor/go.etcd.io/etcd/raft/tracker/progress.go b/vendor/go.etcd.io/etcd/raft/tracker/progress.go new file mode 100644 index 0000000000..62c81f45af --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/tracker/progress.go @@ -0,0 +1,259 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +import ( + "fmt" + "sort" + "strings" +) + +// Progress represents a follower’s progress in the view of the leader. Leader +// maintains progresses of all followers, and sends entries to the follower +// based on its progress. +// +// NB(tbg): Progress is basically a state machine whose transitions are mostly +// strewn around `*raft.raft`. Additionally, some fields are only used when in a +// certain State. All of this isn't ideal. +type Progress struct { + Match, Next uint64 + // State defines how the leader should interact with the follower. + // + // When in StateProbe, leader sends at most one replication message + // per heartbeat interval. It also probes actual progress of the follower. + // + // When in StateReplicate, leader optimistically increases next + // to the latest entry sent after sending replication message. This is + // an optimized state for fast replicating log entries to the follower. + // + // When in StateSnapshot, leader should have sent out snapshot + // before and stops sending any replication message. + State StateType + + // PendingSnapshot is used in StateSnapshot. + // If there is a pending snapshot, the pendingSnapshot will be set to the + // index of the snapshot. If pendingSnapshot is set, the replication process of + // this Progress will be paused. raft will not resend snapshot until the pending one + // is reported to be failed. + PendingSnapshot uint64 + + // RecentActive is true if the progress is recently active. Receiving any messages + // from the corresponding follower indicates the progress is active. + // RecentActive can be reset to false after an election timeout. + // + // TODO(tbg): the leader should always have this set to true. + RecentActive bool + + // ProbeSent is used while this follower is in StateProbe. When ProbeSent is + // true, raft should pause sending replication message to this peer until + // ProbeSent is reset. See ProbeAcked() and IsPaused(). + ProbeSent bool + + // Inflights is a sliding window for the inflight messages. + // Each inflight message contains one or more log entries. + // The max number of entries per message is defined in raft config as MaxSizePerMsg. + // Thus inflight effectively limits both the number of inflight messages + // and the bandwidth each Progress can use. + // When inflights is Full, no more message should be sent. + // When a leader sends out a message, the index of the last + // entry should be added to inflights. The index MUST be added + // into inflights in order. + // When a leader receives a reply, the previous inflights should + // be freed by calling inflights.FreeLE with the index of the last + // received entry. + Inflights *Inflights + + // IsLearner is true if this progress is tracked for a learner. + IsLearner bool +} + +// ResetState moves the Progress into the specified State, resetting ProbeSent, +// PendingSnapshot, and Inflights. +func (pr *Progress) ResetState(state StateType) { + pr.ProbeSent = false + pr.PendingSnapshot = 0 + pr.State = state + pr.Inflights.reset() +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +// ProbeAcked is called when this peer has accepted an append. It resets +// ProbeSent to signal that additional append messages should be sent without +// further delay. +func (pr *Progress) ProbeAcked() { + pr.ProbeSent = false +} + +// BecomeProbe transitions into StateProbe. Next is reset to Match+1 or, +// optionally and if larger, the index of the pending snapshot. +func (pr *Progress) BecomeProbe() { + // If the original state is StateSnapshot, progress knows that + // the pending snapshot has been sent to this peer successfully, then + // probes from pendingSnapshot + 1. + if pr.State == StateSnapshot { + pendingSnapshot := pr.PendingSnapshot + pr.ResetState(StateProbe) + pr.Next = max(pr.Match+1, pendingSnapshot+1) + } else { + pr.ResetState(StateProbe) + pr.Next = pr.Match + 1 + } +} + +// BecomeReplicate transitions into StateReplicate, resetting Next to Match+1. +func (pr *Progress) BecomeReplicate() { + pr.ResetState(StateReplicate) + pr.Next = pr.Match + 1 +} + +// BecomeSnapshot moves the Progress to StateSnapshot with the specified pending +// snapshot index. +func (pr *Progress) BecomeSnapshot(snapshoti uint64) { + pr.ResetState(StateSnapshot) + pr.PendingSnapshot = snapshoti +} + +// MaybeUpdate is called when an MsgAppResp arrives from the follower, with the +// index acked by it. The method returns false if the given n index comes from +// an outdated message. Otherwise it updates the progress and returns true. +func (pr *Progress) MaybeUpdate(n uint64) bool { + var updated bool + if pr.Match < n { + pr.Match = n + updated = true + pr.ProbeAcked() + } + if pr.Next < n+1 { + pr.Next = n + 1 + } + return updated +} + +// OptimisticUpdate signals that appends all the way up to and including index n +// are in-flight. As a result, Next is increased to n+1. +func (pr *Progress) OptimisticUpdate(n uint64) { pr.Next = n + 1 } + +// MaybeDecrTo adjusts the Progress to the receipt of a MsgApp rejection. The +// arguments are the index the follower rejected to append to its log, and its +// last index. +// +// Rejections can happen spuriously as messages are sent out of order or +// duplicated. In such cases, the rejection pertains to an index that the +// Progress already knows were previously acknowledged, and false is returned +// without changing the Progress. +// +// If the rejection is genuine, Next is lowered sensibly, and the Progress is +// cleared for sending log entries. +func (pr *Progress) MaybeDecrTo(rejected, last uint64) bool { + if pr.State == StateReplicate { + // The rejection must be stale if the progress has matched and "rejected" + // is smaller than "match". + if rejected <= pr.Match { + return false + } + // Directly decrease next to match + 1. + // + // TODO(tbg): why not use last if it's larger? + pr.Next = pr.Match + 1 + return true + } + + // The rejection must be stale if "rejected" does not match next - 1. This + // is because non-replicating followers are probed one entry at a time. + if pr.Next-1 != rejected { + return false + } + + if pr.Next = min(rejected, last+1); pr.Next < 1 { + pr.Next = 1 + } + pr.ProbeSent = false + return true +} + +// IsPaused returns whether sending log entries to this node has been throttled. +// This is done when a node has rejected recent MsgApps, is currently waiting +// for a snapshot, or has reached the MaxInflightMsgs limit. In normal +// operation, this is false. A throttled node will be contacted less frequently +// until it has reached a state in which it's able to accept a steady stream of +// log entries again. +func (pr *Progress) IsPaused() bool { + switch pr.State { + case StateProbe: + return pr.ProbeSent + case StateReplicate: + return pr.Inflights.Full() + case StateSnapshot: + return true + default: + panic("unexpected state") + } +} + +func (pr *Progress) String() string { + var buf strings.Builder + fmt.Fprintf(&buf, "%s match=%d next=%d", pr.State, pr.Match, pr.Next) + if pr.IsLearner { + fmt.Fprint(&buf, " learner") + } + if pr.IsPaused() { + fmt.Fprint(&buf, " paused") + } + if pr.PendingSnapshot > 0 { + fmt.Fprintf(&buf, " pendingSnap=%d", pr.PendingSnapshot) + } + if !pr.RecentActive { + fmt.Fprintf(&buf, " inactive") + } + if n := pr.Inflights.Count(); n > 0 { + fmt.Fprintf(&buf, " inflight=%d", n) + if pr.Inflights.Full() { + fmt.Fprint(&buf, "[full]") + } + } + return buf.String() +} + +// ProgressMap is a map of *Progress. +type ProgressMap map[uint64]*Progress + +// String prints the ProgressMap in sorted key order, one Progress per line. +func (m ProgressMap) String() string { + ids := make([]uint64, 0, len(m)) + for k := range m { + ids = append(ids, k) + } + sort.Slice(ids, func(i, j int) bool { + return ids[i] < ids[j] + }) + var buf strings.Builder + for _, id := range ids { + fmt.Fprintf(&buf, "%d: %s\n", id, m[id]) + } + return buf.String() +} diff --git a/vendor/go.etcd.io/etcd/raft/tracker/state.go b/vendor/go.etcd.io/etcd/raft/tracker/state.go new file mode 100644 index 0000000000..285b4b8f58 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/tracker/state.go @@ -0,0 +1,42 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +// StateType is the state of a tracked follower. +type StateType uint64 + +const ( + // StateProbe indicates a follower whose last index isn't known. Such a + // follower is "probed" (i.e. an append sent periodically) to narrow down + // its last index. In the ideal (and common) case, only one round of probing + // is necessary as the follower will react with a hint. Followers that are + // probed over extended periods of time are often offline. + StateProbe StateType = iota + // StateReplicate is the state steady in which a follower eagerly receives + // log entries to append to its log. + StateReplicate + // StateSnapshot indicates a follower that needs log entries not available + // from the leader's Raft log. Such a follower needs a full snapshot to + // return to StateReplicate. + StateSnapshot +) + +var prstmap = [...]string{ + "StateProbe", + "StateReplicate", + "StateSnapshot", +} + +func (st StateType) String() string { return prstmap[uint64(st)] } diff --git a/vendor/go.etcd.io/etcd/raft/tracker/tracker.go b/vendor/go.etcd.io/etcd/raft/tracker/tracker.go new file mode 100644 index 0000000000..a4581143d1 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/tracker/tracker.go @@ -0,0 +1,288 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +import ( + "fmt" + "sort" + "strings" + + "go.etcd.io/etcd/raft/quorum" + pb "go.etcd.io/etcd/raft/raftpb" +) + +// Config reflects the configuration tracked in a ProgressTracker. +type Config struct { + Voters quorum.JointConfig + // AutoLeave is true if the configuration is joint and a transition to the + // incoming configuration should be carried out automatically by Raft when + // this is possible. If false, the configuration will be joint until the + // application initiates the transition manually. + AutoLeave bool + // Learners is a set of IDs corresponding to the learners active in the + // current configuration. + // + // Invariant: Learners and Voters does not intersect, i.e. if a peer is in + // either half of the joint config, it can't be a learner; if it is a + // learner it can't be in either half of the joint config. This invariant + // simplifies the implementation since it allows peers to have clarity about + // its current role without taking into account joint consensus. + Learners map[uint64]struct{} + // When we turn a voter into a learner during a joint consensus transition, + // we cannot add the learner directly when entering the joint state. This is + // because this would violate the invariant that the intersection of + // voters and learners is empty. For example, assume a Voter is removed and + // immediately re-added as a learner (or in other words, it is demoted): + // + // Initially, the configuration will be + // + // voters: {1 2 3} + // learners: {} + // + // and we want to demote 3. Entering the joint configuration, we naively get + // + // voters: {1 2} & {1 2 3} + // learners: {3} + // + // but this violates the invariant (3 is both voter and learner). Instead, + // we get + // + // voters: {1 2} & {1 2 3} + // learners: {} + // next_learners: {3} + // + // Where 3 is now still purely a voter, but we are remembering the intention + // to make it a learner upon transitioning into the final configuration: + // + // voters: {1 2} + // learners: {3} + // next_learners: {} + // + // Note that next_learners is not used while adding a learner that is not + // also a voter in the joint config. In this case, the learner is added + // right away when entering the joint configuration, so that it is caught up + // as soon as possible. + LearnersNext map[uint64]struct{} +} + +func (c Config) String() string { + var buf strings.Builder + fmt.Fprintf(&buf, "voters=%s", c.Voters) + if c.Learners != nil { + fmt.Fprintf(&buf, " learners=%s", quorum.MajorityConfig(c.Learners).String()) + } + if c.LearnersNext != nil { + fmt.Fprintf(&buf, " learners_next=%s", quorum.MajorityConfig(c.LearnersNext).String()) + } + if c.AutoLeave { + fmt.Fprintf(&buf, " autoleave") + } + return buf.String() +} + +// Clone returns a copy of the Config that shares no memory with the original. +func (c *Config) Clone() Config { + clone := func(m map[uint64]struct{}) map[uint64]struct{} { + if m == nil { + return nil + } + mm := make(map[uint64]struct{}, len(m)) + for k := range m { + mm[k] = struct{}{} + } + return mm + } + return Config{ + Voters: quorum.JointConfig{clone(c.Voters[0]), clone(c.Voters[1])}, + Learners: clone(c.Learners), + LearnersNext: clone(c.LearnersNext), + } +} + +// ProgressTracker tracks the currently active configuration and the information +// known about the nodes and learners in it. In particular, it tracks the match +// index for each peer which in turn allows reasoning about the committed index. +type ProgressTracker struct { + Config + + Progress ProgressMap + + Votes map[uint64]bool + + MaxInflight int +} + +// MakeProgressTracker initializes a ProgressTracker. +func MakeProgressTracker(maxInflight int) ProgressTracker { + p := ProgressTracker{ + MaxInflight: maxInflight, + Config: Config{ + Voters: quorum.JointConfig{ + quorum.MajorityConfig{}, + nil, // only populated when used + }, + Learners: nil, // only populated when used + LearnersNext: nil, // only populated when used + }, + Votes: map[uint64]bool{}, + Progress: map[uint64]*Progress{}, + } + return p +} + +// ConfState returns a ConfState representing the active configuration. +func (p *ProgressTracker) ConfState() pb.ConfState { + return pb.ConfState{ + Voters: p.Voters[0].Slice(), + VotersOutgoing: p.Voters[1].Slice(), + Learners: quorum.MajorityConfig(p.Learners).Slice(), + LearnersNext: quorum.MajorityConfig(p.LearnersNext).Slice(), + AutoLeave: p.AutoLeave, + } +} + +// IsSingleton returns true if (and only if) there is only one voting member +// (i.e. the leader) in the current configuration. +func (p *ProgressTracker) IsSingleton() bool { + return len(p.Voters[0]) == 1 && len(p.Voters[1]) == 0 +} + +type matchAckIndexer map[uint64]*Progress + +var _ quorum.AckedIndexer = matchAckIndexer(nil) + +// AckedIndex implements IndexLookuper. +func (l matchAckIndexer) AckedIndex(id uint64) (quorum.Index, bool) { + pr, ok := l[id] + if !ok { + return 0, false + } + return quorum.Index(pr.Match), true +} + +// Committed returns the largest log index known to be committed based on what +// the voting members of the group have acknowledged. +func (p *ProgressTracker) Committed() uint64 { + return uint64(p.Voters.CommittedIndex(matchAckIndexer(p.Progress))) +} + +func insertionSort(sl []uint64) { + a, b := 0, len(sl) + for i := a + 1; i < b; i++ { + for j := i; j > a && sl[j] < sl[j-1]; j-- { + sl[j], sl[j-1] = sl[j-1], sl[j] + } + } +} + +// Visit invokes the supplied closure for all tracked progresses in stable order. +func (p *ProgressTracker) Visit(f func(id uint64, pr *Progress)) { + n := len(p.Progress) + // We need to sort the IDs and don't want to allocate since this is hot code. + // The optimization here mirrors that in `(MajorityConfig).CommittedIndex`, + // see there for details. + var sl [7]uint64 + ids := sl[:] + if len(sl) >= n { + ids = sl[:n] + } else { + ids = make([]uint64, n) + } + for id := range p.Progress { + n-- + ids[n] = id + } + insertionSort(ids) + for _, id := range ids { + f(id, p.Progress[id]) + } +} + +// QuorumActive returns true if the quorum is active from the view of the local +// raft state machine. Otherwise, it returns false. +func (p *ProgressTracker) QuorumActive() bool { + votes := map[uint64]bool{} + p.Visit(func(id uint64, pr *Progress) { + if pr.IsLearner { + return + } + votes[id] = pr.RecentActive + }) + + return p.Voters.VoteResult(votes) == quorum.VoteWon +} + +// VoterNodes returns a sorted slice of voters. +func (p *ProgressTracker) VoterNodes() []uint64 { + m := p.Voters.IDs() + nodes := make([]uint64, 0, len(m)) + for id := range m { + nodes = append(nodes, id) + } + sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] }) + return nodes +} + +// LearnerNodes returns a sorted slice of learners. +func (p *ProgressTracker) LearnerNodes() []uint64 { + if len(p.Learners) == 0 { + return nil + } + nodes := make([]uint64, 0, len(p.Learners)) + for id := range p.Learners { + nodes = append(nodes, id) + } + sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] }) + return nodes +} + +// ResetVotes prepares for a new round of vote counting via recordVote. +func (p *ProgressTracker) ResetVotes() { + p.Votes = map[uint64]bool{} +} + +// RecordVote records that the node with the given id voted for this Raft +// instance if v == true (and declined it otherwise). +func (p *ProgressTracker) RecordVote(id uint64, v bool) { + _, ok := p.Votes[id] + if !ok { + p.Votes[id] = v + } +} + +// TallyVotes returns the number of granted and rejected Votes, and whether the +// election outcome is known. +func (p *ProgressTracker) TallyVotes() (granted int, rejected int, _ quorum.VoteResult) { + // Make sure to populate granted/rejected correctly even if the Votes slice + // contains members no longer part of the configuration. This doesn't really + // matter in the way the numbers are used (they're informational), but might + // as well get it right. + for id, pr := range p.Progress { + if pr.IsLearner { + continue + } + v, voted := p.Votes[id] + if !voted { + continue + } + if v { + granted++ + } else { + rejected++ + } + } + result := p.Voters.VoteResult(p.Votes) + return granted, rejected, result +} diff --git a/vendor/go.etcd.io/etcd/raft/util.go b/vendor/go.etcd.io/etcd/raft/util.go new file mode 100644 index 0000000000..785cf735d5 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/util.go @@ -0,0 +1,233 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "fmt" + "strings" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +func (st StateType) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", st.String())), nil +} + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func IsLocalMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable || + msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum +} + +func IsResponseMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp +} + +// voteResponseType maps vote and prevote message types to their corresponding responses. +func voteRespMsgType(msgt pb.MessageType) pb.MessageType { + switch msgt { + case pb.MsgVote: + return pb.MsgVoteResp + case pb.MsgPreVote: + return pb.MsgPreVoteResp + default: + panic(fmt.Sprintf("not a vote message: %s", msgt)) + } +} + +func DescribeHardState(hs pb.HardState) string { + var buf strings.Builder + fmt.Fprintf(&buf, "Term:%d", hs.Term) + if hs.Vote != 0 { + fmt.Fprintf(&buf, " Vote:%d", hs.Vote) + } + fmt.Fprintf(&buf, " Commit:%d", hs.Commit) + return buf.String() +} + +func DescribeSoftState(ss SoftState) string { + return fmt.Sprintf("Lead:%d State:%s", ss.Lead, ss.RaftState) +} + +func DescribeConfState(state pb.ConfState) string { + return fmt.Sprintf( + "Voters:%v VotersOutgoing:%v Learners:%v LearnersNext:%v AutoLeave:%v", + state.Voters, state.VotersOutgoing, state.Learners, state.LearnersNext, state.AutoLeave, + ) +} + +func DescribeSnapshot(snap pb.Snapshot) string { + m := snap.Metadata + return fmt.Sprintf("Index:%d Term:%d ConfState:%s", m.Index, m.Term, DescribeConfState(m.ConfState)) +} + +func DescribeReady(rd Ready, f EntryFormatter) string { + var buf strings.Builder + if rd.SoftState != nil { + fmt.Fprint(&buf, DescribeSoftState(*rd.SoftState)) + buf.WriteByte('\n') + } + if !IsEmptyHardState(rd.HardState) { + fmt.Fprintf(&buf, "HardState %s", DescribeHardState(rd.HardState)) + buf.WriteByte('\n') + } + if len(rd.ReadStates) > 0 { + fmt.Fprintf(&buf, "ReadStates %v\n", rd.ReadStates) + } + if len(rd.Entries) > 0 { + buf.WriteString("Entries:\n") + fmt.Fprint(&buf, DescribeEntries(rd.Entries, f)) + } + if !IsEmptySnap(rd.Snapshot) { + fmt.Fprintf(&buf, "Snapshot %s\n", DescribeSnapshot(rd.Snapshot)) + } + if len(rd.CommittedEntries) > 0 { + buf.WriteString("CommittedEntries:\n") + fmt.Fprint(&buf, DescribeEntries(rd.CommittedEntries, f)) + } + if len(rd.Messages) > 0 { + buf.WriteString("Messages:\n") + for _, msg := range rd.Messages { + fmt.Fprint(&buf, DescribeMessage(msg, f)) + buf.WriteByte('\n') + } + } + if buf.Len() > 0 { + return fmt.Sprintf("Ready MustSync=%t:\n%s", rd.MustSync, buf.String()) + } + return "" +} + +// EntryFormatter can be implemented by the application to provide human-readable formatting +// of entry data. Nil is a valid EntryFormatter and will use a default format. +type EntryFormatter func([]byte) string + +// DescribeMessage returns a concise human-readable description of a +// Message for debugging. +func DescribeMessage(m pb.Message, f EntryFormatter) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) + if m.Reject { + fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint) + } + if m.Commit != 0 { + fmt.Fprintf(&buf, " Commit:%d", m.Commit) + } + if len(m.Entries) > 0 { + fmt.Fprintf(&buf, " Entries:[") + for i, e := range m.Entries { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteString(DescribeEntry(e, f)) + } + fmt.Fprintf(&buf, "]") + } + if !IsEmptySnap(m.Snapshot) { + fmt.Fprintf(&buf, " Snapshot: %s", DescribeSnapshot(m.Snapshot)) + } + return buf.String() +} + +// PayloadSize is the size of the payload of this Entry. Notably, it does not +// depend on its Index or Term. +func PayloadSize(e pb.Entry) int { + return len(e.Data) +} + +// DescribeEntry returns a concise human-readable description of an +// Entry for debugging. +func DescribeEntry(e pb.Entry, f EntryFormatter) string { + if f == nil { + f = func(data []byte) string { return fmt.Sprintf("%q", data) } + } + + formatConfChange := func(cc pb.ConfChangeI) string { + // TODO(tbg): give the EntryFormatter a type argument so that it gets + // a chance to expose the Context. + return pb.ConfChangesToString(cc.AsV2().Changes) + } + + var formatted string + switch e.Type { + case pb.EntryNormal: + formatted = f(e.Data) + case pb.EntryConfChange: + var cc pb.ConfChange + if err := cc.Unmarshal(e.Data); err != nil { + formatted = err.Error() + } else { + formatted = formatConfChange(cc) + } + case pb.EntryConfChangeV2: + var cc pb.ConfChangeV2 + if err := cc.Unmarshal(e.Data); err != nil { + formatted = err.Error() + } else { + formatted = formatConfChange(cc) + } + } + if formatted != "" { + formatted = " " + formatted + } + return fmt.Sprintf("%d/%d %s%s", e.Term, e.Index, e.Type, formatted) +} + +// DescribeEntries calls DescribeEntry for each Entry, adding a newline to +// each. +func DescribeEntries(ents []pb.Entry, f EntryFormatter) string { + var buf bytes.Buffer + for _, e := range ents { + _, _ = buf.WriteString(DescribeEntry(e, f) + "\n") + } + return buf.String() +} + +func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { + if len(ents) == 0 { + return ents + } + size := ents[0].Size() + var limit int + for limit = 1; limit < len(ents); limit++ { + size += ents[limit].Size() + if uint64(size) > maxSize { + break + } + } + return ents[:limit] +} + +func assertConfStatesEquivalent(l Logger, cs1, cs2 pb.ConfState) { + err := cs1.Equivalent(cs2) + if err == nil { + return + } + l.Panic(err) +} diff --git a/vendor/go.etcd.io/etcd/version/version.go b/vendor/go.etcd.io/etcd/version/version.go new file mode 100644 index 0000000000..ee97e461e7 --- /dev/null +++ b/vendor/go.etcd.io/etcd/version/version.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package version implements etcd version parsing and contains latest version +// information. +package version + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" +) + +var ( + // MinClusterVersion is the min cluster version this etcd binary is compatible with. + MinClusterVersion = "3.0.0" + Version = "3.4.13" + APIVersion = "unknown" + + // Git SHA Value will be set during build + GitSHA = "Not provided (use ./build instead of go build)" +) + +func init() { + ver, err := semver.NewVersion(Version) + if err == nil { + APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) + } +} + +type Versions struct { + Server string `json:"etcdserver"` + Cluster string `json:"etcdcluster"` + // TODO: raft state machine version +} + +// Cluster only keeps the major.minor. +func Cluster(v string) string { + vs := strings.Split(v, ".") + if len(vs) <= 2 { + return v + } + return fmt.Sprintf("%s.%s", vs[0], vs[1]) +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 0000000000..d3596ee66f --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,752 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem() + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer or to a big.Int, it panics. It reports whether the +// read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + if reflect.TypeOf(out).Kind() != reflect.Ptr { + panic("out is not a pointer") + } + switch reflect.ValueOf(out).Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case reflect.Struct: + if reflect.TypeOf(out).Elem() == bigIntType { + return s.readASN1BigInt(out.(*big.Int)) + } + } + panic("out does not point to an integer type") +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 4 { + return false + } + ret <<= 7 + b := s.read(1)[0] + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := uint8(bytes[0]) + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 0000000000..cda8e3edfd --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 0000000000..ca7b1db5ce --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,337 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back n bytes written directly to the Builder. An attempt by a +// child builder passed to a continuation to unwrite bytes from its parent will +// panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 0000000000..589d297e6b --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,161 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte // import "golang.org/x/crypto/cryptobyte" + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go new file mode 100644 index 0000000000..f38797bfa1 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +// Package subtle implements functions that are often useful in cryptographic +// code but require careful thought to use correctly. +package subtle // import "golang.org/x/crypto/internal/subtle" + +import "unsafe" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && + uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go new file mode 100644 index 0000000000..0cc4a8a642 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// Package subtle implements functions that are often useful in cryptographic +// code but require careful thought to use correctly. +package subtle // import "golang.org/x/crypto/internal/subtle" + +// This is the Google App Engine standard variant based on reflect +// because the unsafe package and cgo are disallowed. + +import "reflect" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && + reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go new file mode 100644 index 0000000000..a98d1bd45c --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -0,0 +1,173 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package secretbox encrypts and authenticates small messages. + +Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with +secret-key cryptography. The length of messages is not hidden. + +It is the caller's responsibility to ensure the uniqueness of nonces—for +example, by using nonce 1 for the first message, nonce 2 for the second +message, etc. Nonces are long enough that randomly generated nonces have +negligible risk of collision. + +Messages should be small because: + +1. The whole message needs to be held in memory to be processed. + +2. Using large messages pressures implementations on small machines to decrypt +and process plaintext before authenticating it. This is very dangerous, and +this API does not allow it, but a protocol that uses excessive message sizes +might present some implementations with no other choice. + +3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. + +4. Performance may be improved by working with messages that fit into data caches. + +Thus large amounts of data should be chunked so that each message is small. +(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable +chunk size. + +This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. +*/ +package secretbox // import "golang.org/x/crypto/nacl/secretbox" + +import ( + "golang.org/x/crypto/internal/subtle" + "golang.org/x/crypto/poly1305" + "golang.org/x/crypto/salsa20/salsa" +) + +// Overhead is the number of bytes of overhead when boxing a message. +const Overhead = poly1305.TagSize + +// setup produces a sub-key and Salsa20 counter given a nonce and key. +func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { + // We use XSalsa20 for encryption so first we need to generate a + // key and nonce with HSalsa20. + var hNonce [16]byte + copy(hNonce[:], nonce[:]) + salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) + + // The final 8 bytes of the original nonce form the new nonce. + copy(counter[:], nonce[16:]) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + +// Seal appends an encrypted and authenticated copy of message to out, which +// must not overlap message. The key and nonce pair must be unique for each +// distinct message and the output will be Overhead bytes longer than message. +func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + + ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) + if subtle.AnyOverlap(out, message) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of message with the keystream generated from + // the first block. + firstMessageBlock := message + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + + tagOut := out + out = out[poly1305.TagSize:] + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + message = message[len(firstMessageBlock):] + ciphertext := out + out = out[len(firstMessageBlock):] + + // Now encrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, message, &counter, &subKey) + + var tag [poly1305.TagSize]byte + poly1305.Sum(&tag, ciphertext, &poly1305Key) + copy(tagOut, tag[:]) + + return ret +} + +// Open authenticates and decrypts a box produced by Seal and appends the +// message to out, which must not overlap box. The output will be Overhead +// bytes smaller than box. +func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { + if len(box) < Overhead { + return nil, false + } + + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + var tag [poly1305.TagSize]byte + copy(tag[:], box) + + if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { + return nil, false + } + + ret, out := sliceForAppend(out, len(box)-Overhead) + if subtle.AnyOverlap(out, box) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of box with the keystream generated from + // the first block. + box = box[Overhead:] + firstMessageBlock := box + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + + box = box[len(firstMessageBlock):] + out = out[len(firstMessageBlock):] + + // Now decrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, box, &counter, &subKey) + + return ret, true +} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/poly1305/bits_compat.go new file mode 100644 index 0000000000..157a69f61b --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/bits_compat.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +package poly1305 + +// Generic fallbacks for the math/bits intrinsics, copied from +// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had +// variable time fallbacks until Go 1.13. + +func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { + sum = x + y + carry + carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 + return +} + +func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { + diff = x - y - borrow + borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 + return +} + +func bitsMul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go new file mode 100644 index 0000000000..a0a185f0fc --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package poly1305 + +import "math/bits" + +func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { + return bits.Add64(x, y, carry) +} + +func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { + return bits.Sub64(x, y, borrow) +} + +func bitsMul64(x, y uint64) (hi, lo uint64) { + return bits.Mul64(x, y) +} diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go new file mode 100644 index 0000000000..d118f30ed5 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!ppc64le,!s390x gccgo purego + +package poly1305 + +type mac struct{ macGeneric } diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go new file mode 100644 index 0000000000..9d7a6af09f --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go @@ -0,0 +1,99 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package poly1305 implements Poly1305 one-time message authentication code as +// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. +// +// Poly1305 is a fast, one-time authentication function. It is infeasible for an +// attacker to generate an authenticator for a message without the key. However, a +// key must only be used for a single message. Authenticating two different +// messages with the same key allows an attacker to forge authenticators for other +// messages with the same key. +// +// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +// used with a fixed key in order to generate one-time keys from an nonce. +// However, in this package AES isn't used and the one-time key is specified +// directly. +package poly1305 // import "golang.org/x/crypto/poly1305" + +import "crypto/subtle" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +const TagSize = 16 + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := New(key) + h.Write(m) + h.Sum(out[:0]) +} + +// Verify returns true if mac is a valid authenticator for m with the given key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + var tmp [16]byte + Sum(&tmp, m, key) + return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 +} + +// New returns a new MAC computing an authentication +// tag of all data written to it with the given key. +// This allows writing the message progressively instead +// of passing it as a single slice. Common users should use +// the Sum function instead. +// +// The key must be unique for each message, as authenticating +// two different messages with the same key allows an attacker +// to forge messages at will. +func New(key *[32]byte) *MAC { + m := &MAC{} + initialize(key, &m.macState) + return m +} + +// MAC is an io.Writer computing an authentication tag +// of the data written to it. +// +// MAC cannot be used like common hash.Hash implementations, +// because using a poly1305 key twice breaks its security. +// Therefore writing data to a running MAC after calling +// Sum or Verify causes it to panic. +type MAC struct { + mac // platform-dependent implementation + + finalized bool +} + +// Size returns the number of bytes Sum will return. +func (h *MAC) Size() int { return TagSize } + +// Write adds more data to the running message authentication code. +// It never returns an error. +// +// It must not be called after the first call of Sum or Verify. +func (h *MAC) Write(p []byte) (n int, err error) { + if h.finalized { + panic("poly1305: write to MAC after Sum or Verify") + } + return h.mac.Write(p) +} + +// Sum computes the authenticator of all data written to the +// message authentication code. +func (h *MAC) Sum(b []byte) []byte { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return append(b, mac[:]...) +} + +// Verify returns whether the authenticator of all data written to +// the message authentication code matches the expected value. +func (h *MAC) Verify(expected []byte) bool { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return subtle.ConstantTimeCompare(expected, mac[:]) == 1 +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go new file mode 100644 index 0000000000..99e5a1d50e --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -0,0 +1,47 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s new file mode 100644 index 0000000000..8d394a212e --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -0,0 +1,108 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +#include "textflag.h" + +#define POLY1305_ADD(msg, h0, h1, h2) \ + ADDQ 0(msg), h0; \ + ADCQ 8(msg), h1; \ + ADCQ $1, h2; \ + LEAQ 16(msg), msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ + MOVQ r0, AX; \ + MULQ h0; \ + MOVQ AX, t0; \ + MOVQ DX, t1; \ + MOVQ r0, AX; \ + MULQ h1; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ r0, t2; \ + IMULQ h2, t2; \ + ADDQ DX, t2; \ + \ + MOVQ r1, AX; \ + MULQ h0; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ DX, h0; \ + MOVQ r1, t3; \ + IMULQ h2, t3; \ + MOVQ r1, AX; \ + MULQ h1; \ + ADDQ AX, t2; \ + ADCQ DX, t3; \ + ADDQ h0, t2; \ + ADCQ $0, t3; \ + \ + MOVQ t0, h0; \ + MOVQ t1, h1; \ + MOVQ t2, h2; \ + ANDQ $3, h2; \ + MOVQ t2, t0; \ + ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ + ADDQ t0, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2; \ + SHRQ $2, t3, t2; \ + SHRQ $2, t3; \ + ADDQ t2, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVQ state+0(FP), DI + MOVQ msg_base+8(FP), SI + MOVQ msg_len+16(FP), R15 + + MOVQ 0(DI), R8 // h0 + MOVQ 8(DI), R9 // h1 + MOVQ 16(DI), R10 // h2 + MOVQ 24(DI), R11 // r0 + MOVQ 32(DI), R12 // r1 + + CMPQ R15, $16 + JB bytes_between_0_and_15 + +loop: + POLY1305_ADD(SI, R8, R9, R10) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) + SUBQ $16, R15 + CMPQ R15, $16 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $1, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $8, BX, CX + SHLQ $8, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0, R10 + MOVQ $16, R15 + JMP multiply + +done: + MOVQ R8, 0(DI) + MOVQ R9, 8(DI) + MOVQ R10, 16(DI) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go new file mode 100644 index 0000000000..c942a65904 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_generic.go @@ -0,0 +1,310 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file provides the generic implementation of Sum and MAC. Other files +// might provide optimized assembly implementations of some of this code. + +package poly1305 + +import "encoding/binary" + +// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag +// for a 64 bytes message is approximately +// +// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5 +// +// for some secret r and s. It can be computed sequentially like +// +// for len(msg) > 0: +// h += read(msg, 16) +// h *= r +// h %= 2¹³⁰ - 5 +// return h + s +// +// All the complexity is about doing performant constant-time math on numbers +// larger than any available numeric type. + +func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMACGeneric(key) + h.Write(msg) + h.Sum(out) +} + +func newMACGeneric(key *[32]byte) macGeneric { + m := macGeneric{} + initialize(key, &m.macState) + return m +} + +// macState holds numbers in saturated 64-bit little-endian limbs. That is, +// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. +type macState struct { + // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but + // can grow larger during and after rounds. It must, however, remain below + // 2 * (2¹³⁰ - 5). + h [3]uint64 + // r and s are the private key components. + r [2]uint64 + s [2]uint64 +} + +type macGeneric struct { + macState + + buffer [TagSize]byte + offset int +} + +// Write splits the incoming message into TagSize chunks, and passes them to +// update. It buffers incomplete chunks. +func (h *macGeneric) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + updateGeneric(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + updateGeneric(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +// Sum flushes the last incomplete chunk from the buffer, if any, and generates +// the MAC output. It does not modify its state, in order to allow for multiple +// calls to Sum, even if no Write is allowed after Sum. +func (h *macGeneric) Sum(out *[TagSize]byte) { + state := h.macState + if h.offset > 0 { + updateGeneric(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} + +// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It +// clears some bits of the secret coefficient to make it possible to implement +// multiplication more efficiently. +const ( + rMask0 = 0x0FFFFFFC0FFFFFFF + rMask1 = 0x0FFFFFFC0FFFFFFC +) + +// initialize loads the 256-bit key into the two 128-bit secret values r and s. +func initialize(key *[32]byte, m *macState) { + m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 + m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 + m.s[0] = binary.LittleEndian.Uint64(key[16:24]) + m.s[1] = binary.LittleEndian.Uint64(key[24:32]) +} + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +func mul64(a, b uint64) uint128 { + hi, lo := bitsMul64(a, b) + return uint128{lo, hi} +} + +func add128(a, b uint128) uint128 { + lo, c := bitsAdd64(a.lo, b.lo, 0) + hi, c := bitsAdd64(a.hi, b.hi, c) + if c != 0 { + panic("poly1305: unexpected overflow") + } + return uint128{lo, hi} +} + +func shiftRightBy2(a uint128) uint128 { + a.lo = a.lo>>2 | (a.hi&3)<<62 + a.hi = a.hi >> 2 + return a +} + +// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of +// 128 bits of message, it computes +// +// h₊ = (h + m) * r mod 2¹³⁰ - 5 +// +// If the msg length is not a multiple of TagSize, it assumes the last +// incomplete chunk is the final one. +func updateGeneric(state *macState, msg []byte) { + h0, h1, h2 := state.h[0], state.h[1], state.h[2] + r0, r1 := state.r[0], state.r[1] + + for len(msg) > 0 { + var c uint64 + + // For the first step, h + m, we use a chain of bits.Add64 intrinsics. + // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially + // reduced at the end of the multiplication below. + // + // The spec requires us to set a bit just above the message size, not to + // hide leading zeroes. For full chunks, that's 1 << 128, so we can just + // add 1 to the most significant (2¹²⁸) limb, h2. + if len(msg) >= TagSize { + h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h2 += c + 1 + + msg = msg[TagSize:] + } else { + var buf [TagSize]byte + copy(buf[:], msg) + buf[len(msg)] = 1 + + h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h2 += c + + msg = nil + } + + // Multiplication of big number limbs is similar to elementary school + // columnar multiplication. Instead of digits, there are 64-bit limbs. + // + // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. + // + // h2 h1 h0 x + // r1 r0 = + // ---------------- + // h2r0 h1r0 h0r0 <-- individual 128-bit products + // + h2r1 h1r1 h0r1 + // ------------------------ + // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs + // ------------------------ + // m3.hi m2.hi m1.hi m0.hi <-- carry propagation + // + m3.lo m2.lo m1.lo m0.lo + // ------------------------------- + // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs + // + // The main difference from pen-and-paper multiplication is that we do + // carry propagation in a separate step, as if we wrote two digit sums + // at first (the 128-bit limbs), and then carried the tens all at once. + + h0r0 := mul64(h0, r0) + h1r0 := mul64(h1, r0) + h2r0 := mul64(h2, r0) + h0r1 := mul64(h0, r1) + h1r1 := mul64(h1, r1) + h2r1 := mul64(h2, r1) + + // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their + // top 4 bits cleared by rMask{0,1}, we know that their product is not going + // to overflow 64 bits, so we can ignore the high part of the products. + // + // This also means that the product doesn't have a fifth limb (t4). + if h2r0.hi != 0 { + panic("poly1305: unexpected overflow") + } + if h2r1.hi != 0 { + panic("poly1305: unexpected overflow") + } + + m0 := h0r0 + m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again + m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. + m3 := h2r1 + + t0 := m0.lo + t1, c := bitsAdd64(m1.lo, m0.hi, 0) + t2, c := bitsAdd64(m2.lo, m1.hi, c) + t3, _ := bitsAdd64(m3.lo, m2.hi, c) + + // Now we have the result as 4 64-bit limbs, and we need to reduce it + // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do + // a cheap partial reduction according to the reduction identity + // + // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5 + // + // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is + // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the + // assumptions we make about h in the rest of the code. + // + // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 + + // We split the final result at the 2¹³⁰ mark into h and cc, the carry. + // Note that the carry bits are effectively shifted left by 2, in other + // words, cc = c * 4 for the c in the reduction identity. + h0, h1, h2 = t0, t1, t2&maskLow2Bits + cc := uint128{t2 & maskNotLow2Bits, t3} + + // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. + + h0, c = bitsAdd64(h0, cc.lo, 0) + h1, c = bitsAdd64(h1, cc.hi, c) + h2 += c + + cc = shiftRightBy2(cc) + + h0, c = bitsAdd64(h0, cc.lo, 0) + h1, c = bitsAdd64(h1, cc.hi, c) + h2 += c + + // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most + // + // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1 + } + + state.h[0], state.h[1], state.h[2] = h0, h1, h2 +} + +const ( + maskLow2Bits uint64 = 0x0000000000000003 + maskNotLow2Bits uint64 = ^maskLow2Bits +) + +// select64 returns x if v == 1 and y if v == 0, in constant time. +func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } + +// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order. +const ( + p0 = 0xFFFFFFFFFFFFFFFB + p1 = 0xFFFFFFFFFFFFFFFF + p2 = 0x0000000000000003 +) + +// finalize completes the modular reduction of h and computes +// +// out = h + s mod 2¹²⁸ +// +func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { + h0, h1, h2 := h[0], h[1], h[2] + + // After the partial reduction in updateGeneric, h might be more than + // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction + // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the + // result if the subtraction underflows, and t otherwise. + + hMinusP0, b := bitsSub64(h0, p0, 0) + hMinusP1, b := bitsSub64(h1, p1, b) + _, b = bitsSub64(h2, p2, b) + + // h = h if h < p else h - p + h0 = select64(b, h0, hMinusP0) + h1 = select64(b, h1, hMinusP1) + + // Finally, we compute the last Poly1305 step + // + // tag = h + s mod 2¹²⁸ + // + // by just doing a wide addition with the 128 low bits of h and discarding + // the overflow. + h0, c := bitsAdd64(h0, s[0], 0) + h1, _ = bitsAdd64(h1, s[1], c) + + binary.LittleEndian.PutUint64(out[0:8], h0) + binary.LittleEndian.PutUint64(out[8:16], h1) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go new file mode 100644 index 0000000000..2e7a120b19 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go @@ -0,0 +1,47 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s new file mode 100644 index 0000000000..4e02813879 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s @@ -0,0 +1,181 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +#include "textflag.h" + +// This was ported from the amd64 implementation. + +#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ + MOVD (msg), t0; \ + MOVD 8(msg), t1; \ + MOVD $1, t2; \ + ADDC t0, h0, h0; \ + ADDE t1, h1, h1; \ + ADDE t2, h2; \ + ADD $16, msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ + MULLD r0, h0, t0; \ + MULLD r0, h1, t4; \ + MULHDU r0, h0, t1; \ + MULHDU r0, h1, t5; \ + ADDC t4, t1, t1; \ + MULLD r0, h2, t2; \ + ADDZE t5; \ + MULHDU r1, h0, t4; \ + MULLD r1, h0, h0; \ + ADD t5, t2, t2; \ + ADDC h0, t1, t1; \ + MULLD h2, r1, t3; \ + ADDZE t4, h0; \ + MULHDU r1, h1, t5; \ + MULLD r1, h1, t4; \ + ADDC t4, t2, t2; \ + ADDE t5, t3, t3; \ + ADDC h0, t2, t2; \ + MOVD $-4, t4; \ + MOVD t0, h0; \ + MOVD t1, h1; \ + ADDZE t3; \ + ANDCC $3, t2, h2; \ + AND t2, t4, t0; \ + ADDC t0, h0, h0; \ + ADDE t3, h1, h1; \ + SLD $62, t3, t4; \ + SRD $2, t2; \ + ADDZE h2; \ + OR t4, t2, t2; \ + SRD $2, t3; \ + ADDC t2, h0, h0; \ + ADDE t3, h1, h1; \ + ADDZE h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVD state+0(FP), R3 + MOVD msg_base+8(FP), R4 + MOVD msg_len+16(FP), R5 + + MOVD 0(R3), R8 // h0 + MOVD 8(R3), R9 // h1 + MOVD 16(R3), R10 // h2 + MOVD 24(R3), R11 // r0 + MOVD 32(R3), R12 // r1 + + CMP R5, $16 + BLT bytes_between_0_and_15 + +loop: + POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) + ADD $-16, R5 + CMP R5, $16 + BGE loop + +bytes_between_0_and_15: + CMP $0, R5 + BEQ done + MOVD $0, R16 // h0 + MOVD $0, R17 // h1 + +flush_buffer: + CMP R5, $8 + BLE just1 + + MOVD $8, R21 + SUB R21, R5, R21 + + // Greater than 8 -- load the rightmost remaining bytes in msg + // and put into R17 (h1) + MOVD (R4)(R21), R17 + MOVD $16, R22 + + // Find the offset to those bytes + SUB R5, R22, R22 + SLD $3, R22 + + // Shift to get only the bytes in msg + SRD R22, R17, R17 + + // Put 1 at high end + MOVD $1, R23 + SLD $3, R21 + SLD R21, R23, R23 + OR R23, R17, R17 + + // Remainder is 8 + MOVD $8, R5 + +just1: + CMP R5, $8 + BLT less8 + + // Exactly 8 + MOVD (R4), R16 + + CMP $0, R17 + + // Check if we've already set R17; if not + // set 1 to indicate end of msg. + BNE carry + MOVD $1, R17 + BR carry + +less8: + MOVD $0, R16 // h0 + MOVD $0, R22 // shift count + CMP R5, $4 + BLT less4 + MOVWZ (R4), R16 + ADD $4, R4 + ADD $-4, R5 + MOVD $32, R22 + +less4: + CMP R5, $2 + BLT less2 + MOVHZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $16, R22 + ADD $-2, R5 + ADD $2, R4 + +less2: + CMP $0, R5 + BEQ insert1 + MOVBZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $8, R22 + +insert1: + // Insert 1 at end of msg + MOVD $1, R21 + SLD R22, R21, R21 + OR R16, R21, R16 + +carry: + // Add new values to h0, h1, h2 + ADDC R16, R8 + ADDE R17, R9 + ADDE $0, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply + +done: + // Save h0, h1, h2 in state + MOVD R8, 0(R3) + MOVD R9, 8(R3) + MOVD R10, 16(R3) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go new file mode 100644 index 0000000000..958fedc079 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go @@ -0,0 +1,75 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +package poly1305 + +import ( + "golang.org/x/sys/cpu" +) + +// updateVX is an assembly implementation of Poly1305 that uses vector +// instructions. It must only be called if the vector facility (vx) is +// available. +//go:noescape +func updateVX(state *macState, msg []byte) + +// mac is a replacement for macGeneric that uses a larger buffer and redirects +// calls that would have gone to updateGeneric to updateVX if the vector +// facility is installed. +// +// A larger buffer is required for good performance because the vector +// implementation has a higher fixed cost per call than the generic +// implementation. +type mac struct { + macState + + buffer [16 * TagSize]byte // size must be a multiple of block size (16) + offset int +} + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < len(h.buffer) { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + if cpu.S390X.HasVX { + updateVX(&h.macState, h.buffer[:]) + } else { + updateGeneric(&h.macState, h.buffer[:]) + } + } + + tail := len(p) % len(h.buffer) // number of bytes to copy into buffer + body := len(p) - tail // number of bytes to process now + if body > 0 { + if cpu.S390X.HasVX { + updateVX(&h.macState, p[:body]) + } else { + updateGeneric(&h.macState, p[:body]) + } + } + h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 + return nn, nil +} + +func (h *mac) Sum(out *[TagSize]byte) { + state := h.macState + remainder := h.buffer[:h.offset] + + // Use the generic implementation if we have 2 or fewer blocks left + // to sum. The vector implementation has a higher startup time. + if cpu.S390X.HasVX && len(remainder) > 2*TagSize { + updateVX(&state, remainder) + } else if len(remainder) > 0 { + updateGeneric(&state, remainder) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s new file mode 100644 index 0000000000..0fa9ee6e0b --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s @@ -0,0 +1,503 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +#include "textflag.h" + +// This implementation of Poly1305 uses the vector facility (vx) +// to process up to 2 blocks (32 bytes) per iteration using an +// algorithm based on the one described in: +// +// NEON crypto, Daniel J. Bernstein & Peter Schwabe +// https://cryptojedi.org/papers/neoncrypto-20120320.pdf +// +// This algorithm uses 5 26-bit limbs to represent a 130-bit +// value. These limbs are, for the most part, zero extended and +// placed into 64-bit vector register elements. Each vector +// register is 128-bits wide and so holds 2 of these elements. +// Using 26-bit limbs allows us plenty of headroom to accomodate +// accumulations before and after multiplication without +// overflowing either 32-bits (before multiplication) or 64-bits +// (after multiplication). +// +// In order to parallelise the operations required to calculate +// the sum we use two separate accumulators and then sum those +// in an extra final step. For compatibility with the generic +// implementation we perform this summation at the end of every +// updateVX call. +// +// To use two accumulators we must multiply the message blocks +// by r² rather than r. Only the final message block should be +// multiplied by r. +// +// Example: +// +// We want to calculate the sum (h) for a 64 byte message (m): +// +// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r +// +// To do this we split the calculation into the even indices +// and odd indices of the message. These form our SIMD 'lanes': +// +// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 +// m[16:32]r³ + m[48:64]r <- lane 1 +// +// To calculate this iteratively we refactor so that both lanes +// are written in terms of r² and r: +// +// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 +// (m[16:32]r² + m[48:64])r <- lane 1 +// ^ ^ +// | coefficients for second iteration +// coefficients for first iteration +// +// So in this case we would have two iterations. In the first +// both lanes are multiplied by r². In the second only the +// first lane is multiplied by r² and the second lane is +// instead multiplied by r. This gives use the odd and even +// powers of r that we need from the original equation. +// +// Notation: +// +// h - accumulator +// r - key +// m - message +// +// [a, b] - SIMD register holding two 64-bit values +// [a, b, c, d] - SIMD register holding four 32-bit values +// xᵢ[n] - limb n of variable x with bit width i +// +// Limbs are expressed in little endian order, so for 26-bit +// limbs x₂₆[4] will be the most significant limb and x₂₆[0] +// will be the least significant limb. + +// masking constants +#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits +#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits + +// expansion constants (see EXPAND macro) +#define EX0 V2 +#define EX1 V3 +#define EX2 V4 + +// key (r², r or 1 depending on context) +#define R_0 V5 +#define R_1 V6 +#define R_2 V7 +#define R_3 V8 +#define R_4 V9 + +// precalculated coefficients (5r², 5r or 0 depending on context) +#define R5_1 V10 +#define R5_2 V11 +#define R5_3 V12 +#define R5_4 V13 + +// message block (m) +#define M_0 V14 +#define M_1 V15 +#define M_2 V16 +#define M_3 V17 +#define M_4 V18 + +// accumulator (h) +#define H_0 V19 +#define H_1 V20 +#define H_2 V21 +#define H_3 V22 +#define H_4 V23 + +// temporary registers (for short-lived values) +#define T_0 V24 +#define T_1 V25 +#define T_2 V26 +#define T_3 V27 +#define T_4 V28 + +GLOBL ·constants<>(SB), RODATA, $0x30 +// EX0 +DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 +DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 +// EX1 +DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 +DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 +// EX2 +DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d +DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d + +// MULTIPLY multiplies each lane of f and g, partially reduced +// modulo 2¹³⁰ - 5. The result, h, consists of partial products +// in each lane that need to be reduced further to produce the +// final result. +// +// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ +// +// Note that the multiplication by 5 of the high bits is +// achieved by precalculating the multiplication of four of the +// g coefficients by 5. These are g51-g54. +#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ + VMLOF f0, g0, h0 \ + VMLOF f0, g3, h3 \ + VMLOF f0, g1, h1 \ + VMLOF f0, g4, h4 \ + VMLOF f0, g2, h2 \ + VMLOF f1, g54, T_0 \ + VMLOF f1, g2, T_3 \ + VMLOF f1, g0, T_1 \ + VMLOF f1, g3, T_4 \ + VMLOF f1, g1, T_2 \ + VMALOF f2, g53, h0, h0 \ + VMALOF f2, g1, h3, h3 \ + VMALOF f2, g54, h1, h1 \ + VMALOF f2, g2, h4, h4 \ + VMALOF f2, g0, h2, h2 \ + VMALOF f3, g52, T_0, T_0 \ + VMALOF f3, g0, T_3, T_3 \ + VMALOF f3, g53, T_1, T_1 \ + VMALOF f3, g1, T_4, T_4 \ + VMALOF f3, g54, T_2, T_2 \ + VMALOF f4, g51, h0, h0 \ + VMALOF f4, g54, h3, h3 \ + VMALOF f4, g52, h1, h1 \ + VMALOF f4, g0, h4, h4 \ + VMALOF f4, g53, h2, h2 \ + VAG T_0, h0, h0 \ + VAG T_3, h3, h3 \ + VAG T_1, h1, h1 \ + VAG T_4, h4, h4 \ + VAG T_2, h2, h2 + +// REDUCE performs the following carry operations in four +// stages, as specified in Bernstein & Schwabe: +// +// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] +// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] +// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] +// 4: h₂₆[3]->h₂₆[4] +// +// The result is that all of the limbs are limited to 26-bits +// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. +// +// Note that although each limb is aligned at 26-bit intervals +// they may contain values that exceed 2²⁶ - 1, hence the need +// to carry the excess bits in each limb. +#define REDUCE(h0, h1, h2, h3, h4) \ + VESRLG $26, h0, T_0 \ + VESRLG $26, h3, T_1 \ + VN MOD26, h0, h0 \ + VN MOD26, h3, h3 \ + VAG T_0, h1, h1 \ + VAG T_1, h4, h4 \ + VESRLG $26, h1, T_2 \ + VESRLG $26, h4, T_3 \ + VN MOD26, h1, h1 \ + VN MOD26, h4, h4 \ + VESLG $2, T_3, T_4 \ + VAG T_3, T_4, T_4 \ + VAG T_2, h2, h2 \ + VAG T_4, h0, h0 \ + VESRLG $26, h2, T_0 \ + VESRLG $26, h0, T_1 \ + VN MOD26, h2, h2 \ + VN MOD26, h0, h0 \ + VAG T_0, h3, h3 \ + VAG T_1, h1, h1 \ + VESRLG $26, h3, T_2 \ + VN MOD26, h3, h3 \ + VAG T_2, h4, h4 + +// EXPAND splits the 128-bit little-endian values in0 and in1 +// into 26-bit big-endian limbs and places the results into +// the first and second lane of d₂₆[0:4] respectively. +// +// The EX0, EX1 and EX2 constants are arrays of byte indices +// for permutation. The permutation both reverses the bytes +// in the input and ensures the bytes are copied into the +// destination limb ready to be shifted into their final +// position. +#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ + VPERM in0, in1, EX0, d0 \ + VPERM in0, in1, EX1, d2 \ + VPERM in0, in1, EX2, d4 \ + VESRLG $26, d0, d1 \ + VESRLG $30, d2, d3 \ + VESRLG $4, d2, d2 \ + VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] + VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] + VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] + VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] + VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] + +// func updateVX(state *macState, msg []byte) +TEXT ·updateVX(SB), NOSPLIT, $0 + MOVD state+0(FP), R1 + LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len + + // load EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + // generate masks + VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] + VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] + + // load h (accumulator) and r (key) from state + VZERO T_1 // [0, 0] + VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] + VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] + VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] + VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] + VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] + + // unpack h and r into 26-bit limbs + // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value + VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] + VZERO H_1 // [0, 0] + VZERO H_3 // [0, 0] + VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out + VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] + VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] + VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only + VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] + VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only + VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete + VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete + + // replicate r across all 4 vector elements + VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] + VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] + VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] + VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] + VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] + + // zero out lane 1 of h + VLEIG $1, $0, H_0 // [h₂₆[0], 0] + VLEIG $1, $0, H_1 // [h₂₆[1], 0] + VLEIG $1, $0, H_2 // [h₂₆[2], 0] + VLEIG $1, $0, H_3 // [h₂₆[3], 0] + VLEIG $1, $0, H_4 // [h₂₆[4], 0] + + // calculate 5r (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] + + // skip r² calculation if we are only calculating one block + CMPBLE R3, $16, skip + + // calculate r² + MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) + REDUCE(M_0, M_1, M_2, M_3, M_4) + VGBM $0x0f0f, T_0 + VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] + VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] + VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] + VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] + VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] + + // calculate 5r² (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] + +loop: + CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients + + // load next 2 blocks from message + VLM (R2), T_0, T_1 + + // update message slice + SUB $32, R3 + MOVD $32(R2), R2 + + // unpack message blocks into 26-bit big-endian limbs + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // add 2¹²⁸ to each message block value + VLEIB $4, $1, M_4 + VLEIB $12, $1, M_4 + +multiply: + // accumulate the incoming message + VAG H_0, M_0, M_0 + VAG H_3, M_3, M_3 + VAG H_1, M_1, M_1 + VAG H_4, M_4, M_4 + VAG H_2, M_2, M_2 + + // multiply the accumulator by the key coefficient + MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + + // carry and partially reduce the partial products + REDUCE(H_0, H_1, H_2, H_3, H_4) + + CMPBNE R3, $0, loop + +finish: + // sum lane 0 and lane 1 and put the result in lane 1 + VZERO T_0 + VSUMQG H_0, T_0, H_0 + VSUMQG H_3, T_0, H_3 + VSUMQG H_1, T_0, H_1 + VSUMQG H_4, T_0, H_4 + VSUMQG H_2, T_0, H_2 + + // reduce again after summation + // TODO(mundaym): there might be a more efficient way to do this + // now that we only have 1 active lane. For example, we could + // simultaneously pack the values as we reduce them. + REDUCE(H_0, H_1, H_2, H_3, H_4) + + // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 + // TODO(mundaym): in testing this final carry was unnecessary. + // Needs a proof before it can be removed though. + VESRLG $26, H_1, T_1 + VN MOD26, H_1, H_1 + VAQ T_1, H_2, H_2 + VESRLG $26, H_2, T_2 + VN MOD26, H_2, H_2 + VAQ T_2, H_3, H_3 + VESRLG $26, H_3, T_3 + VN MOD26, H_3, H_3 + VAQ T_3, H_4, H_4 + + // h is now < 2(2¹³⁰ - 5) + // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. + VESLG $26, H_1, H_1 + VESLG $26, H_3, H_3 + VO H_0, H_1, H_0 + VO H_2, H_3, H_2 + VESLG $4, H_2, H_2 + VLEIB $7, $48, H_1 + VSLB H_1, H_2, H_2 + VO H_0, H_2, H_0 + VLEIB $7, $104, H_1 + VSLB H_1, H_4, H_3 + VO H_3, H_0, H_0 + VLEIB $7, $24, H_1 + VSRLB H_1, H_4, H_1 + + // update state + VSTEG $1, H_0, 0(R1) + VSTEG $0, H_0, 8(R1) + VSTEG $1, H_1, 16(R1) + RET + +b2: // 2 or fewer blocks remaining + CMPBLE R3, $16, b1 + + // Load the 2 remaining blocks (17-32 bytes remaining). + MOVD $-17(R3), R0 // index of final byte to load modulo 16 + VL (R2), T_0 // load full 16 byte block + VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) + CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long + VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 + + // Split both blocks into 26-bit limbs in the appropriate lanes. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the second to last block. + VLEIB $4, $1, M_4 + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $12, $1, M_4 + + // Finally, set up the coefficients for the final multiplication. + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r² so that can be kept the + // same. We want lane 1 to be multiplied by r so we need to move + // the saved r value into the 32-bit odd index in lane 1 by + // rotating the 64-bit lane by 32. + VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only + VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] + VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] + VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] + VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] + VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] + VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] + VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] + VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] + VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] + + MOVD $0, R3 + BR multiply + +skip: + CMPBEQ R3, $0, finish + +b1: // 1 block remaining + + // Load the final block (1-16 bytes). This will be placed into + // lane 0. + MOVD $-1(R3), R0 + VLL R0, (R2), T_0 // pad to 16 bytes with zeros + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_0 + + // Set the message block in lane 1 to the value 0 so that it + // can be accumulated without affecting the final result. + VZERO T_1 + + // Split the final message block into 26-bit limbs in lane 0. + // Lane 1 will be contain 0. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $4, $1, M_4 + + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r so we need to move the + // saved r value into the 32-bit odd index in lane 0. We want + // lane 1 to be set to the value 1. This makes multiplication + // a no-op. We do this by setting lane 1 in every register to 0 + // and then just setting the 32-bit index 3 in R_0 to 1. + VZERO T_0 + MOVD $0, R0 + MOVD $0x10111213, R12 + VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] + VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] + VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] + VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] + VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] + VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] + VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] + VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] + VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] + VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] + + // Set the value of lane 1 to be 1. + VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] + + MOVD $0, R3 + BR multiply diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go new file mode 100644 index 0000000000..4c96147c86 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package salsa provides low-level access to functions in the Salsa family. +package salsa // import "golang.org/x/crypto/salsa20/salsa" + +// Sigma is the Salsa20 constant for 256-bit keys. +var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} + +// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte +// key k, and 16-byte constant c, and puts the result into the 32-byte array +// out. +func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + for i := 0; i < 20; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x5) + out[5] = byte(x5 >> 8) + out[6] = byte(x5 >> 16) + out[7] = byte(x5 >> 24) + + out[8] = byte(x10) + out[9] = byte(x10 >> 8) + out[10] = byte(x10 >> 16) + out[11] = byte(x10 >> 24) + + out[12] = byte(x15) + out[13] = byte(x15 >> 8) + out[14] = byte(x15 >> 16) + out[15] = byte(x15 >> 24) + + out[16] = byte(x6) + out[17] = byte(x6 >> 8) + out[18] = byte(x6 >> 16) + out[19] = byte(x6 >> 24) + + out[20] = byte(x7) + out[21] = byte(x7 >> 8) + out[22] = byte(x7 >> 16) + out[23] = byte(x7 >> 24) + + out[24] = byte(x8) + out[25] = byte(x8 >> 8) + out[26] = byte(x8 >> 16) + out[27] = byte(x8 >> 24) + + out[28] = byte(x9) + out[29] = byte(x9 >> 8) + out[30] = byte(x9 >> 16) + out[31] = byte(x9 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go new file mode 100644 index 0000000000..9bfc0927ce --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go @@ -0,0 +1,199 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts +// the result into the 64-byte array out. The input and output may be the same array. +func Core208(out *[64]byte, in *[64]byte) { + j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 + j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 + j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 + j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 + j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 + j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 + j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 + j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 + j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 + j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 + j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 + j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < 8; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go new file mode 100644 index 0000000000..656e8df942 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +package salsa + +//go:noescape + +// salsa2020XORKeyStream is implemented in salsa20_amd64.s. +func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + if len(in) == 0 { + return + } + _ = out[len(in)-1] + salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s new file mode 100644 index 0000000000..18085d2e8c --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s @@ -0,0 +1,883 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) +// This needs up to 64 bytes at 360(SP); hence the non-obvious frame size. +TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment + MOVQ out+0(FP),DI + MOVQ in+8(FP),SI + MOVQ n+16(FP),DX + MOVQ nonce+24(FP),CX + MOVQ key+32(FP),R8 + + MOVQ SP,R12 + MOVQ SP,R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP + + MOVQ DX,R9 + MOVQ CX,DX + MOVQ R8,R10 + CMPQ R9,$0 + JBE DONE + START: + MOVL 20(R10),CX + MOVL 0(R10),R8 + MOVL 0(DX),AX + MOVL 16(R10),R11 + MOVL CX,0(SP) + MOVL R8, 4 (SP) + MOVL AX, 8 (SP) + MOVL R11, 12 (SP) + MOVL 8(DX),CX + MOVL 24(R10),R8 + MOVL 4(R10),AX + MOVL 4(DX),R11 + MOVL CX,16(SP) + MOVL R8, 20 (SP) + MOVL AX, 24 (SP) + MOVL R11, 28 (SP) + MOVL 12(DX),CX + MOVL 12(R10),DX + MOVL 28(R10),R8 + MOVL 8(R10),AX + MOVL DX,32(SP) + MOVL CX, 36 (SP) + MOVL R8, 40 (SP) + MOVL AX, 44 (SP) + MOVQ $1634760805,DX + MOVQ $857760878,CX + MOVQ $2036477234,R8 + MOVQ $1797285236,AX + MOVL DX,48(SP) + MOVL CX, 52 (SP) + MOVL R8, 56 (SP) + MOVL AX, 60 (SP) + CMPQ R9,$256 + JB BYTESBETWEEN1AND255 + MOVOA 48(SP),X0 + PSHUFL $0X55,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X3 + PSHUFL $0X00,X0,X0 + MOVOA X1,64(SP) + MOVOA X2,80(SP) + MOVOA X3,96(SP) + MOVOA X0,112(SP) + MOVOA 0(SP),X0 + PSHUFL $0XAA,X0,X1 + PSHUFL $0XFF,X0,X2 + PSHUFL $0X00,X0,X3 + PSHUFL $0X55,X0,X0 + MOVOA X1,128(SP) + MOVOA X2,144(SP) + MOVOA X3,160(SP) + MOVOA X0,176(SP) + MOVOA 16(SP),X0 + PSHUFL $0XFF,X0,X1 + PSHUFL $0X55,X0,X2 + PSHUFL $0XAA,X0,X0 + MOVOA X1,192(SP) + MOVOA X2,208(SP) + MOVOA X0,224(SP) + MOVOA 32(SP),X0 + PSHUFL $0X00,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X0 + MOVOA X1,240(SP) + MOVOA X2,256(SP) + MOVOA X0,272(SP) + BYTESATLEAST256: + MOVL 16(SP),DX + MOVL 36 (SP),CX + MOVL DX,288(SP) + MOVL CX,304(SP) + SHLQ $32,CX + ADDQ CX,DX + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 292 (SP) + MOVL CX, 308 (SP) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 296 (SP) + MOVL CX, 312 (SP) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 300 (SP) + MOVL CX, 316 (SP) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX,16(SP) + MOVL CX, 36 (SP) + MOVQ R9,352(SP) + MOVQ $20,DX + MOVOA 64(SP),X0 + MOVOA 80(SP),X1 + MOVOA 96(SP),X2 + MOVOA 256(SP),X3 + MOVOA 272(SP),X4 + MOVOA 128(SP),X5 + MOVOA 144(SP),X6 + MOVOA 176(SP),X7 + MOVOA 192(SP),X8 + MOVOA 208(SP),X9 + MOVOA 224(SP),X10 + MOVOA 304(SP),X11 + MOVOA 112(SP),X12 + MOVOA 160(SP),X13 + MOVOA 240(SP),X14 + MOVOA 288(SP),X15 + MAINLOOP1: + MOVOA X1,320(SP) + MOVOA X2,336(SP) + MOVOA X13,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X14 + PSRLL $25,X2 + PXOR X2,X14 + MOVOA X7,X1 + PADDL X0,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X11 + PSRLL $25,X2 + PXOR X2,X11 + MOVOA X12,X1 + PADDL X14,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X15 + PSRLL $23,X2 + PXOR X2,X15 + MOVOA X0,X1 + PADDL X11,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X9 + PSRLL $23,X2 + PXOR X2,X9 + MOVOA X14,X1 + PADDL X15,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X13 + PSRLL $19,X2 + PXOR X2,X13 + MOVOA X11,X1 + PADDL X9,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X7 + PSRLL $19,X2 + PXOR X2,X7 + MOVOA X15,X1 + PADDL X13,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA 320(SP),X1 + MOVOA X12,320(SP) + MOVOA X9,X2 + PADDL X7,X2 + MOVOA X2,X12 + PSLLL $18,X2 + PXOR X2,X0 + PSRLL $14,X12 + PXOR X12,X0 + MOVOA X5,X2 + PADDL X1,X2 + MOVOA X2,X12 + PSLLL $7,X2 + PXOR X2,X3 + PSRLL $25,X12 + PXOR X12,X3 + MOVOA 336(SP),X2 + MOVOA X0,336(SP) + MOVOA X6,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X4 + PSRLL $25,X12 + PXOR X12,X4 + MOVOA X1,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X10 + PSRLL $23,X12 + PXOR X12,X10 + MOVOA X2,X0 + PADDL X4,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X8 + PSRLL $23,X12 + PXOR X12,X8 + MOVOA X3,X0 + PADDL X10,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X5 + PSRLL $19,X12 + PXOR X12,X5 + MOVOA X4,X0 + PADDL X8,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X6 + PSRLL $19,X12 + PXOR X12,X6 + MOVOA X10,X0 + PADDL X5,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA 320(SP),X0 + MOVOA X1,320(SP) + MOVOA X4,X1 + PADDL X0,X1 + MOVOA X1,X12 + PSLLL $7,X1 + PXOR X1,X7 + PSRLL $25,X12 + PXOR X12,X7 + MOVOA X8,X1 + PADDL X6,X1 + MOVOA X1,X12 + PSLLL $18,X1 + PXOR X1,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 336(SP),X12 + MOVOA X2,336(SP) + MOVOA X14,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X5 + PSRLL $25,X2 + PXOR X2,X5 + MOVOA X0,X1 + PADDL X7,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X10 + PSRLL $23,X2 + PXOR X2,X10 + MOVOA X12,X1 + PADDL X5,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X8 + PSRLL $23,X2 + PXOR X2,X8 + MOVOA X7,X1 + PADDL X10,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X4 + PSRLL $19,X2 + PXOR X2,X4 + MOVOA X5,X1 + PADDL X8,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X14 + PSRLL $19,X2 + PXOR X2,X14 + MOVOA X10,X1 + PADDL X4,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X0 + PSRLL $14,X2 + PXOR X2,X0 + MOVOA 320(SP),X1 + MOVOA X0,320(SP) + MOVOA X8,X0 + PADDL X14,X0 + MOVOA X0,X2 + PSLLL $18,X0 + PXOR X0,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA X11,X0 + PADDL X1,X0 + MOVOA X0,X2 + PSLLL $7,X0 + PXOR X0,X6 + PSRLL $25,X2 + PXOR X2,X6 + MOVOA 336(SP),X2 + MOVOA X12,336(SP) + MOVOA X3,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X13 + PSRLL $25,X12 + PXOR X12,X13 + MOVOA X1,X0 + PADDL X6,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X15 + PSRLL $23,X12 + PXOR X12,X15 + MOVOA X2,X0 + PADDL X13,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X9 + PSRLL $23,X12 + PXOR X12,X9 + MOVOA X6,X0 + PADDL X15,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X11 + PSRLL $19,X12 + PXOR X12,X11 + MOVOA X13,X0 + PADDL X9,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X3 + PSRLL $19,X12 + PXOR X12,X3 + MOVOA X15,X0 + PADDL X11,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA X9,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 320(SP),X12 + MOVOA 336(SP),X0 + SUBQ $2,DX + JA MAINLOOP1 + PADDL 112(SP),X12 + PADDL 176(SP),X7 + PADDL 224(SP),X10 + PADDL 272(SP),X4 + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 0(SI),DX + XORL 4(SI),CX + XORL 8(SI),R8 + XORL 12(SI),R9 + MOVL DX,0(DI) + MOVL CX,4(DI) + MOVL R8,8(DI) + MOVL R9,12(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 64(SI),DX + XORL 68(SI),CX + XORL 72(SI),R8 + XORL 76(SI),R9 + MOVL DX,64(DI) + MOVL CX,68(DI) + MOVL R8,72(DI) + MOVL R9,76(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 128(SI),DX + XORL 132(SI),CX + XORL 136(SI),R8 + XORL 140(SI),R9 + MOVL DX,128(DI) + MOVL CX,132(DI) + MOVL R8,136(DI) + MOVL R9,140(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + XORL 192(SI),DX + XORL 196(SI),CX + XORL 200(SI),R8 + XORL 204(SI),R9 + MOVL DX,192(DI) + MOVL CX,196(DI) + MOVL R8,200(DI) + MOVL R9,204(DI) + PADDL 240(SP),X14 + PADDL 64(SP),X0 + PADDL 128(SP),X5 + PADDL 192(SP),X8 + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 16(SI),DX + XORL 20(SI),CX + XORL 24(SI),R8 + XORL 28(SI),R9 + MOVL DX,16(DI) + MOVL CX,20(DI) + MOVL R8,24(DI) + MOVL R9,28(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 80(SI),DX + XORL 84(SI),CX + XORL 88(SI),R8 + XORL 92(SI),R9 + MOVL DX,80(DI) + MOVL CX,84(DI) + MOVL R8,88(DI) + MOVL R9,92(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 144(SI),DX + XORL 148(SI),CX + XORL 152(SI),R8 + XORL 156(SI),R9 + MOVL DX,144(DI) + MOVL CX,148(DI) + MOVL R8,152(DI) + MOVL R9,156(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + XORL 208(SI),DX + XORL 212(SI),CX + XORL 216(SI),R8 + XORL 220(SI),R9 + MOVL DX,208(DI) + MOVL CX,212(DI) + MOVL R8,216(DI) + MOVL R9,220(DI) + PADDL 288(SP),X15 + PADDL 304(SP),X11 + PADDL 80(SP),X1 + PADDL 144(SP),X6 + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 32(SI),DX + XORL 36(SI),CX + XORL 40(SI),R8 + XORL 44(SI),R9 + MOVL DX,32(DI) + MOVL CX,36(DI) + MOVL R8,40(DI) + MOVL R9,44(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 96(SI),DX + XORL 100(SI),CX + XORL 104(SI),R8 + XORL 108(SI),R9 + MOVL DX,96(DI) + MOVL CX,100(DI) + MOVL R8,104(DI) + MOVL R9,108(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 160(SI),DX + XORL 164(SI),CX + XORL 168(SI),R8 + XORL 172(SI),R9 + MOVL DX,160(DI) + MOVL CX,164(DI) + MOVL R8,168(DI) + MOVL R9,172(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + XORL 224(SI),DX + XORL 228(SI),CX + XORL 232(SI),R8 + XORL 236(SI),R9 + MOVL DX,224(DI) + MOVL CX,228(DI) + MOVL R8,232(DI) + MOVL R9,236(DI) + PADDL 160(SP),X13 + PADDL 208(SP),X9 + PADDL 256(SP),X3 + PADDL 96(SP),X2 + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 48(SI),DX + XORL 52(SI),CX + XORL 56(SI),R8 + XORL 60(SI),R9 + MOVL DX,48(DI) + MOVL CX,52(DI) + MOVL R8,56(DI) + MOVL R9,60(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 112(SI),DX + XORL 116(SI),CX + XORL 120(SI),R8 + XORL 124(SI),R9 + MOVL DX,112(DI) + MOVL CX,116(DI) + MOVL R8,120(DI) + MOVL R9,124(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 176(SI),DX + XORL 180(SI),CX + XORL 184(SI),R8 + XORL 188(SI),R9 + MOVL DX,176(DI) + MOVL CX,180(DI) + MOVL R8,184(DI) + MOVL R9,188(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + XORL 240(SI),DX + XORL 244(SI),CX + XORL 248(SI),R8 + XORL 252(SI),R9 + MOVL DX,240(DI) + MOVL CX,244(DI) + MOVL R8,248(DI) + MOVL R9,252(DI) + MOVQ 352(SP),R9 + SUBQ $256,R9 + ADDQ $256,SI + ADDQ $256,DI + CMPQ R9,$256 + JAE BYTESATLEAST256 + CMPQ R9,$0 + JBE DONE + BYTESBETWEEN1AND255: + CMPQ R9,$64 + JAE NOCOPY + MOVQ DI,DX + LEAQ 360(SP),DI + MOVQ R9,CX + REP; MOVSB + LEAQ 360(SP),DI + LEAQ 360(SP),SI + NOCOPY: + MOVQ R9,352(SP) + MOVOA 48(SP),X0 + MOVOA 0(SP),X1 + MOVOA 16(SP),X2 + MOVOA 32(SP),X3 + MOVOA X1,X4 + MOVQ $20,CX + MAINLOOP2: + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + SUBQ $4,CX + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PXOR X7,X7 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + JA MAINLOOP2 + PADDL 48(SP),X0 + PADDL 0(SP),X1 + PADDL 16(SP),X2 + PADDL 32(SP),X3 + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 0(SI),CX + XORL 48(SI),R8 + XORL 32(SI),R9 + XORL 16(SI),AX + MOVL CX,0(DI) + MOVL R8,48(DI) + MOVL R9,32(DI) + MOVL AX,16(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 20(SI),CX + XORL 4(SI),R8 + XORL 52(SI),R9 + XORL 36(SI),AX + MOVL CX,20(DI) + MOVL R8,4(DI) + MOVL R9,52(DI) + MOVL AX,36(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 40(SI),CX + XORL 24(SI),R8 + XORL 8(SI),R9 + XORL 56(SI),AX + MOVL CX,40(DI) + MOVL R8,24(DI) + MOVL R9,8(DI) + MOVL AX,56(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + XORL 60(SI),CX + XORL 44(SI),R8 + XORL 28(SI),R9 + XORL 12(SI),AX + MOVL CX,60(DI) + MOVL R8,44(DI) + MOVL R9,28(DI) + MOVL AX,12(DI) + MOVQ 352(SP),R9 + MOVL 16(SP),CX + MOVL 36 (SP),R8 + ADDQ $1,CX + SHLQ $32,R8 + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $32,R8 + MOVL CX,16(SP) + MOVL R8, 36 (SP) + CMPQ R9,$64 + JA BYTESATLEAST65 + JAE BYTESATLEAST64 + MOVQ DI,SI + MOVQ DX,DI + MOVQ R9,CX + REP; MOVSB + BYTESATLEAST64: + DONE: + MOVQ R12,SP + RET + BYTESATLEAST65: + SUBQ $64,R9 + ADDQ $64,DI + ADDQ $64,SI + JMP BYTESBETWEEN1AND255 diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go new file mode 100644 index 0000000000..8a46bd2b3a --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go @@ -0,0 +1,14 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package salsa + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + genericXORKeyStream(out, in, counter, key) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go new file mode 100644 index 0000000000..68169c6d68 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go @@ -0,0 +1,231 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +const rounds = 20 + +// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, +// and 16-byte constant c, and puts the result into 64-byte array out. +func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < rounds; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} + +// genericXORKeyStream is the generic implementation of XORKeyStream to be used +// when no assembly implementation is available. +func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + var block [64]byte + var counterCopy [16]byte + copy(counterCopy[:], counter[:]) + + for len(in) >= 64 { + core(&block, &counterCopy, key, &Sigma) + for i, x := range block { + out[i] = in[i] ^ x + } + u := uint32(1) + for i := 8; i < 16; i++ { + u += uint32(counterCopy[i]) + counterCopy[i] = byte(u) + u >>= 8 + } + in = in[64:] + out = out[64:] + } + + if len(in) > 0 { + core(&block, &counterCopy, key, &Sigma) + for i, v := range in { + out[i] = v ^ block[i] + } + } +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 0000000000..dc5225b6d4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := int(dstStart.Sub(srcStart) / srcInterval) + srcIndex += advance + srcStart = srcStart.Add(time.Duration(advance) * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 0000000000..c646a6952e --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 0000000000..9bf4286c79 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 0000000000..3ebf6f2daa --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go new file mode 100644 index 0000000000..69a4ac7eef --- /dev/null +++ b/vendor/golang.org/x/net/websocket/client.go @@ -0,0 +1,106 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "io" + "net" + "net/http" + "net/url" +) + +// DialError is an error that occurs while dialling a websocket server. +type DialError struct { + *Config + Err error +} + +func (e *DialError) Error() string { + return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error() +} + +// NewConfig creates a new WebSocket config for client connection. +func NewConfig(server, origin string) (config *Config, err error) { + config = new(Config) + config.Version = ProtocolVersionHybi13 + config.Location, err = url.ParseRequestURI(server) + if err != nil { + return + } + config.Origin, err = url.ParseRequestURI(origin) + if err != nil { + return + } + config.Header = http.Header(make(map[string][]string)) + return +} + +// NewClient creates a new WebSocket client connection over rwc. +func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + err = hybiClientHandshake(config, br, bw) + if err != nil { + return + } + buf := bufio.NewReadWriter(br, bw) + ws = newHybiClientConn(config, buf, rwc) + return +} + +// Dial opens a new client connection to a WebSocket. +func Dial(url_, protocol, origin string) (ws *Conn, err error) { + config, err := NewConfig(url_, origin) + if err != nil { + return nil, err + } + if protocol != "" { + config.Protocol = []string{protocol} + } + return DialConfig(config) +} + +var portMap = map[string]string{ + "ws": "80", + "wss": "443", +} + +func parseAuthority(location *url.URL) string { + if _, ok := portMap[location.Scheme]; ok { + if _, _, err := net.SplitHostPort(location.Host); err != nil { + return net.JoinHostPort(location.Host, portMap[location.Scheme]) + } + } + return location.Host +} + +// DialConfig opens a new client connection to a WebSocket with a config. +func DialConfig(config *Config) (ws *Conn, err error) { + var client net.Conn + if config.Location == nil { + return nil, &DialError{config, ErrBadWebSocketLocation} + } + if config.Origin == nil { + return nil, &DialError{config, ErrBadWebSocketOrigin} + } + dialer := config.Dialer + if dialer == nil { + dialer = &net.Dialer{} + } + client, err = dialWithDialer(dialer, config) + if err != nil { + goto Error + } + ws, err = NewClient(config, client) + if err != nil { + client.Close() + goto Error + } + return + +Error: + return nil, &DialError{config, err} +} diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go new file mode 100644 index 0000000000..2dab943a48 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "net" +) + +func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { + switch config.Location.Scheme { + case "ws": + conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + + case "wss": + conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + + default: + err = ErrBadScheme + } + return +} diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go new file mode 100644 index 0000000000..8cffdd16c9 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -0,0 +1,583 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +// This file implements a protocol of hybi draft. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + closeStatusNormal = 1000 + closeStatusGoingAway = 1001 + closeStatusProtocolError = 1002 + closeStatusUnsupportedData = 1003 + closeStatusFrameTooLarge = 1004 + closeStatusNoStatusRcvd = 1005 + closeStatusAbnormalClosure = 1006 + closeStatusBadMessageData = 1007 + closeStatusPolicyViolation = 1008 + closeStatusTooBigData = 1009 + closeStatusExtensionMismatch = 1010 + + maxControlFramePayloadLength = 125 +) + +var ( + ErrBadMaskingKey = &ProtocolError{"bad masking key"} + ErrBadPongMessage = &ProtocolError{"bad pong message"} + ErrBadClosingStatus = &ProtocolError{"bad closing status"} + ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"} + ErrNotImplemented = &ProtocolError{"not implemented"} + + handshakeHeader = map[string]bool{ + "Host": true, + "Upgrade": true, + "Connection": true, + "Sec-Websocket-Key": true, + "Sec-Websocket-Origin": true, + "Sec-Websocket-Version": true, + "Sec-Websocket-Protocol": true, + "Sec-Websocket-Accept": true, + } +) + +// A hybiFrameHeader is a frame header as defined in hybi draft. +type hybiFrameHeader struct { + Fin bool + Rsv [3]bool + OpCode byte + Length int64 + MaskingKey []byte + + data *bytes.Buffer +} + +// A hybiFrameReader is a reader for hybi frame. +type hybiFrameReader struct { + reader io.Reader + + header hybiFrameHeader + pos int64 + length int +} + +func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { + n, err = frame.reader.Read(msg) + if frame.header.MaskingKey != nil { + for i := 0; i < n; i++ { + msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] + frame.pos++ + } + } + return n, err +} + +func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode } + +func (frame *hybiFrameReader) HeaderReader() io.Reader { + if frame.header.data == nil { + return nil + } + if frame.header.data.Len() == 0 { + return nil + } + return frame.header.data +} + +func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil } + +func (frame *hybiFrameReader) Len() (n int) { return frame.length } + +// A hybiFrameReaderFactory creates new frame reader based on its frame type. +type hybiFrameReaderFactory struct { + *bufio.Reader +} + +// NewFrameReader reads a frame header from the connection, and creates new reader for the frame. +// See Section 5.2 Base Framing protocol for detail. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2 +func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) { + hybiFrame := new(hybiFrameReader) + frame = hybiFrame + var header []byte + var b byte + // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0 + for i := 0; i < 3; i++ { + j := uint(6 - i) + hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0 + } + hybiFrame.header.OpCode = header[0] & 0x0f + + // Second byte. Mask/Payload len(7bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + mask := (b & 0x80) != 0 + b &= 0x7f + lengthFields := 0 + switch { + case b <= 125: // Payload length 7bits. + hybiFrame.header.Length = int64(b) + case b == 126: // Payload length 7+16bits + lengthFields = 2 + case b == 127: // Payload length 7+64bits + lengthFields = 8 + } + for i := 0; i < lengthFields; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits + b &= 0x7f + } + header = append(header, b) + hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) + } + if mask { + // Masking key. 4 bytes. + for i := 0; i < 4; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b) + } + } + hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length) + hybiFrame.header.data = bytes.NewBuffer(header) + hybiFrame.length = len(header) + int(hybiFrame.header.Length) + return +} + +// A HybiFrameWriter is a writer for hybi frame. +type hybiFrameWriter struct { + writer *bufio.Writer + + header *hybiFrameHeader +} + +func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) { + var header []byte + var b byte + if frame.header.Fin { + b |= 0x80 + } + for i := 0; i < 3; i++ { + if frame.header.Rsv[i] { + j := uint(6 - i) + b |= 1 << j + } + } + b |= frame.header.OpCode + header = append(header, b) + if frame.header.MaskingKey != nil { + b = 0x80 + } else { + b = 0 + } + lengthFields := 0 + length := len(msg) + switch { + case length <= 125: + b |= byte(length) + case length < 65536: + b |= 126 + lengthFields = 2 + default: + b |= 127 + lengthFields = 8 + } + header = append(header, b) + for i := 0; i < lengthFields; i++ { + j := uint((lengthFields - i - 1) * 8) + b = byte((length >> j) & 0xff) + header = append(header, b) + } + if frame.header.MaskingKey != nil { + if len(frame.header.MaskingKey) != 4 { + return 0, ErrBadMaskingKey + } + header = append(header, frame.header.MaskingKey...) + frame.writer.Write(header) + data := make([]byte, length) + for i := range data { + data[i] = msg[i] ^ frame.header.MaskingKey[i%4] + } + frame.writer.Write(data) + err = frame.writer.Flush() + return length, err + } + frame.writer.Write(header) + frame.writer.Write(msg) + err = frame.writer.Flush() + return length, err +} + +func (frame *hybiFrameWriter) Close() error { return nil } + +type hybiFrameWriterFactory struct { + *bufio.Writer + needMaskingKey bool +} + +func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType} + if buf.needMaskingKey { + frameHeader.MaskingKey, err = generateMaskingKey() + if err != nil { + return nil, err + } + } + return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil +} + +type hybiFrameHandler struct { + conn *Conn + payloadType byte +} + +func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { + if handler.conn.IsServerConn() { + // The client MUST mask all frames sent to the server. + if frame.(*hybiFrameReader).header.MaskingKey == nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } else { + // The server MUST NOT mask all frames. + if frame.(*hybiFrameReader).header.MaskingKey != nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } + if header := frame.HeaderReader(); header != nil { + io.Copy(ioutil.Discard, header) + } + switch frame.PayloadType() { + case ContinuationFrame: + frame.(*hybiFrameReader).header.OpCode = handler.payloadType + case TextFrame, BinaryFrame: + handler.payloadType = frame.PayloadType() + case CloseFrame: + return nil, io.EOF + case PingFrame, PongFrame: + b := make([]byte, maxControlFramePayloadLength) + n, err := io.ReadFull(frame, b) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + io.Copy(ioutil.Discard, frame) + if frame.PayloadType() == PingFrame { + if _, err := handler.WritePong(b[:n]); err != nil { + return nil, err + } + } + return nil, nil + } + return frame, nil +} + +func (handler *hybiFrameHandler) WriteClose(status int) (err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame) + if err != nil { + return err + } + msg := make([]byte, 2) + binary.BigEndian.PutUint16(msg, uint16(status)) + _, err = w.Write(msg) + w.Close() + return err +} + +func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// newHybiConn creates a new WebSocket connection speaking hybi draft protocol. +func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + if buf == nil { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + buf = bufio.NewReadWriter(br, bw) + } + ws := &Conn{config: config, request: request, buf: buf, rwc: rwc, + frameReaderFactory: hybiFrameReaderFactory{buf.Reader}, + frameWriterFactory: hybiFrameWriterFactory{ + buf.Writer, request == nil}, + PayloadType: TextFrame, + defaultCloseStatus: closeStatusNormal} + ws.frameHandler = &hybiFrameHandler{conn: ws} + return ws +} + +// generateMaskingKey generates a masking key for a frame. +func generateMaskingKey() (maskingKey []byte, err error) { + maskingKey = make([]byte, 4) + if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil { + return + } + return +} + +// generateNonce generates a nonce consisting of a randomly selected 16-byte +// value that has been base64-encoded. +func generateNonce() (nonce []byte) { + key := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + nonce = make([]byte, 24) + base64.StdEncoding.Encode(nonce, key) + return +} + +// removeZone removes IPv6 zone identifer from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of +// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. +func getNonceAccept(nonce []byte) (expected []byte, err error) { + h := sha1.New() + if _, err = h.Write(nonce); err != nil { + return + } + if _, err = h.Write([]byte(websocketGUID)); err != nil { + return + } + expected = make([]byte, 28) + base64.StdEncoding.Encode(expected, h.Sum(nil)) + return +} + +// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17 +func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { + bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") + bw.WriteString("Upgrade: websocket\r\n") + bw.WriteString("Connection: Upgrade\r\n") + nonce := generateNonce() + if config.handshakeData != nil { + nonce = []byte(config.handshakeData["key"]) + } + bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n") + bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n") + + if config.Version != ProtocolVersionHybi13 { + return ErrBadProtocolVersion + } + + bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n") + if len(config.Protocol) > 0 { + bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + err = config.Header.WriteSubset(bw, handshakeHeader) + if err != nil { + return err + } + + bw.WriteString("\r\n") + if err = bw.Flush(); err != nil { + return err + } + + resp, err := http.ReadResponse(br, &http.Request{Method: "GET"}) + if err != nil { + return err + } + if resp.StatusCode != 101 { + return ErrBadStatus + } + if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" || + strings.ToLower(resp.Header.Get("Connection")) != "upgrade" { + return ErrBadUpgrade + } + expectedAccept, err := getNonceAccept(nonce) + if err != nil { + return err + } + if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) { + return ErrChallengeResponse + } + if resp.Header.Get("Sec-WebSocket-Extensions") != "" { + return ErrUnsupportedExtensions + } + offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol") + if offeredProtocol != "" { + protocolMatched := false + for i := 0; i < len(config.Protocol); i++ { + if config.Protocol[i] == offeredProtocol { + protocolMatched = true + break + } + } + if !protocolMatched { + return ErrBadWebSocketProtocol + } + config.Protocol = []string{offeredProtocol} + } + + return nil +} + +// newHybiClientConn creates a client WebSocket connection after handshake. +func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn { + return newHybiConn(config, buf, rwc, nil) +} + +// A HybiServerHandshaker performs a server handshake using hybi draft protocol. +type hybiServerHandshaker struct { + *Config + accept []byte +} + +func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) { + c.Version = ProtocolVersionHybi13 + if req.Method != "GET" { + return http.StatusMethodNotAllowed, ErrBadRequestMethod + } + // HTTP version can be safely ignored. + + if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" || + !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") { + return http.StatusBadRequest, ErrNotWebSocket + } + + key := req.Header.Get("Sec-Websocket-Key") + if key == "" { + return http.StatusBadRequest, ErrChallengeResponse + } + version := req.Header.Get("Sec-Websocket-Version") + switch version { + case "13": + c.Version = ProtocolVersionHybi13 + default: + return http.StatusBadRequest, ErrBadWebSocketVersion + } + var scheme string + if req.TLS != nil { + scheme = "wss" + } else { + scheme = "ws" + } + c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI()) + if err != nil { + return http.StatusBadRequest, err + } + protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol")) + if protocol != "" { + protocols := strings.Split(protocol, ",") + for i := 0; i < len(protocols); i++ { + c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i])) + } + } + c.accept, err = getNonceAccept([]byte(key)) + if err != nil { + return http.StatusInternalServerError, err + } + return http.StatusSwitchingProtocols, nil +} + +// Origin parses the Origin header in req. +// If the Origin header is not set, it returns nil and nil. +func Origin(config *Config, req *http.Request) (*url.URL, error) { + var origin string + switch config.Version { + case ProtocolVersionHybi13: + origin = req.Header.Get("Origin") + } + if origin == "" { + return nil, nil + } + return url.ParseRequestURI(origin) +} + +func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) { + if len(c.Protocol) > 0 { + if len(c.Protocol) != 1 { + // You need choose a Protocol in Handshake func in Server. + return ErrBadWebSocketProtocol + } + } + buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n") + buf.WriteString("Upgrade: websocket\r\n") + buf.WriteString("Connection: Upgrade\r\n") + buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n") + if len(c.Protocol) > 0 { + buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + if c.Header != nil { + err := c.Header.WriteSubset(buf, handshakeHeader) + if err != nil { + return err + } + } + buf.WriteString("\r\n") + return buf.Flush() +} + +func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiServerConn(c.Config, buf, rwc, request) +} + +// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol. +func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiConn(config, buf, rwc, request) +} diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go new file mode 100644 index 0000000000..0895dea190 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/server.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "fmt" + "io" + "net/http" +) + +func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) { + var hs serverHandshaker = &hybiServerHandshaker{Config: config} + code, err := hs.ReadHandshake(buf.Reader, req) + if err == ErrBadWebSocketVersion { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if err != nil { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if handshake != nil { + err = handshake(config, req) + if err != nil { + code = http.StatusForbidden + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + } + err = hs.AcceptHandshake(buf.Writer) + if err != nil { + code = http.StatusBadRequest + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + conn = hs.NewServerConn(buf, rwc, req) + return +} + +// Server represents a server of a WebSocket. +type Server struct { + // Config is a WebSocket configuration for new WebSocket connection. + Config + + // Handshake is an optional function in WebSocket handshake. + // For example, you can check, or don't check Origin header. + // Another example, you can select config.Protocol. + Handshake func(*Config, *http.Request) error + + // Handler handles a WebSocket connection. + Handler +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.serveWebSocket(w, req) +} + +func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) { + rwc, buf, err := w.(http.Hijacker).Hijack() + if err != nil { + panic("Hijack failed: " + err.Error()) + } + // The server should abort the WebSocket connection if it finds + // the client did not send a handshake that matches with protocol + // specification. + defer rwc.Close() + conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake) + if err != nil { + return + } + if conn == nil { + panic("unexpected nil conn") + } + s.Handler(conn) +} + +// Handler is a simple interface to a WebSocket browser client. +// It checks if Origin header is valid URL by default. +// You might want to verify websocket.Conn.Config().Origin in the func. +// If you use Server instead of Handler, you could call websocket.Origin and +// check the origin in your Handshake func. So, if you want to accept +// non-browser clients, which do not send an Origin header, set a +// Server.Handshake that does not check the origin. +type Handler func(*Conn) + +func checkOrigin(config *Config, req *http.Request) (err error) { + config.Origin, err = Origin(config, req) + if err == nil && config.Origin == nil { + return fmt.Errorf("null origin") + } + return err +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s := Server{Handler: h, Handshake: checkOrigin} + s.serveWebSocket(w, req) +} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go new file mode 100644 index 0000000000..6c45c73529 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -0,0 +1,451 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements a client and server for the WebSocket protocol +// as specified in RFC 6455. +// +// This package currently lacks some features found in alternative +// and more actively maintained WebSocket packages: +// +// https://godoc.org/github.com/gorilla/websocket +// https://godoc.org/nhooyr.io/websocket +package websocket // import "golang.org/x/net/websocket" + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + ProtocolVersionHybi13 = 13 + ProtocolVersionHybi = ProtocolVersionHybi13 + SupportedProtocolVersion = "13" + + ContinuationFrame = 0 + TextFrame = 1 + BinaryFrame = 2 + CloseFrame = 8 + PingFrame = 9 + PongFrame = 10 + UnknownFrame = 255 + + DefaultMaxPayloadBytes = 32 << 20 // 32MB +) + +// ProtocolError represents WebSocket protocol errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrBadProtocolVersion = &ProtocolError{"bad protocol version"} + ErrBadScheme = &ProtocolError{"bad scheme"} + ErrBadStatus = &ProtocolError{"bad status"} + ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"} + ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"} + ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"} + ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"} + ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"} + ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"} + ErrBadFrame = &ProtocolError{"bad frame"} + ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"} + ErrNotWebSocket = &ProtocolError{"not websocket protocol"} + ErrBadRequestMethod = &ProtocolError{"bad method"} + ErrNotSupported = &ProtocolError{"not supported"} +) + +// ErrFrameTooLarge is returned by Codec's Receive method if payload size +// exceeds limit set by Conn.MaxPayloadBytes +var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit") + +// Addr is an implementation of net.Addr for WebSocket. +type Addr struct { + *url.URL +} + +// Network returns the network type for a WebSocket, "websocket". +func (addr *Addr) Network() string { return "websocket" } + +// Config is a WebSocket configuration +type Config struct { + // A WebSocket server address. + Location *url.URL + + // A Websocket client origin. + Origin *url.URL + + // WebSocket subprotocols. + Protocol []string + + // WebSocket protocol version. + Version int + + // TLS config for secure WebSocket (wss). + TlsConfig *tls.Config + + // Additional header fields to be sent in WebSocket opening handshake. + Header http.Header + + // Dialer used when opening websocket connections. + Dialer *net.Dialer + + handshakeData map[string]string +} + +// serverHandshaker is an interface to handle WebSocket server side handshake. +type serverHandshaker interface { + // ReadHandshake reads handshake request message from client. + // Returns http response code and error if any. + ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) + + // AcceptHandshake accepts the client handshake request and sends + // handshake response back to client. + AcceptHandshake(buf *bufio.Writer) (err error) + + // NewServerConn creates a new WebSocket connection. + NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn) +} + +// frameReader is an interface to read a WebSocket frame. +type frameReader interface { + // Reader is to read payload of the frame. + io.Reader + + // PayloadType returns payload type. + PayloadType() byte + + // HeaderReader returns a reader to read header of the frame. + HeaderReader() io.Reader + + // TrailerReader returns a reader to read trailer of the frame. + // If it returns nil, there is no trailer in the frame. + TrailerReader() io.Reader + + // Len returns total length of the frame, including header and trailer. + Len() int +} + +// frameReaderFactory is an interface to creates new frame reader. +type frameReaderFactory interface { + NewFrameReader() (r frameReader, err error) +} + +// frameWriter is an interface to write a WebSocket frame. +type frameWriter interface { + // Writer is to write payload of the frame. + io.WriteCloser +} + +// frameWriterFactory is an interface to create new frame writer. +type frameWriterFactory interface { + NewFrameWriter(payloadType byte) (w frameWriter, err error) +} + +type frameHandler interface { + HandleFrame(frame frameReader) (r frameReader, err error) + WriteClose(status int) (err error) +} + +// Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. +type Conn struct { + config *Config + request *http.Request + + buf *bufio.ReadWriter + rwc io.ReadWriteCloser + + rio sync.Mutex + frameReaderFactory + frameReader + + wio sync.Mutex + frameWriterFactory + + frameHandler + PayloadType byte + defaultCloseStatus int + + // MaxPayloadBytes limits the size of frame payload received over Conn + // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used. + MaxPayloadBytes int +} + +// Read implements the io.Reader interface: +// it reads data of a frame from the WebSocket connection. +// if msg is not large enough for the frame data, it fills the msg and next Read +// will read the rest of the frame data. +// it reads Text frame or Binary frame. +func (ws *Conn) Read(msg []byte) (n int, err error) { + ws.rio.Lock() + defer ws.rio.Unlock() +again: + if ws.frameReader == nil { + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return 0, err + } + ws.frameReader, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return 0, err + } + if ws.frameReader == nil { + goto again + } + } + n, err = ws.frameReader.Read(msg) + if err == io.EOF { + if trailer := ws.frameReader.TrailerReader(); trailer != nil { + io.Copy(ioutil.Discard, trailer) + } + ws.frameReader = nil + goto again + } + return n, err +} + +// Write implements the io.Writer interface: +// it writes data as a frame to the WebSocket connection. +func (ws *Conn) Write(msg []byte) (n int, err error) { + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// Close implements the io.Closer interface. +func (ws *Conn) Close() error { + err := ws.frameHandler.WriteClose(ws.defaultCloseStatus) + err1 := ws.rwc.Close() + if err != nil { + return err + } + return err1 +} + +// IsClientConn reports whether ws is a client-side connection. +func (ws *Conn) IsClientConn() bool { return ws.request == nil } + +// IsServerConn reports whether ws is a server-side connection. +func (ws *Conn) IsServerConn() bool { return ws.request != nil } + +// LocalAddr returns the WebSocket Origin for the connection for client, or +// the WebSocket location for server. +func (ws *Conn) LocalAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Origin} + } + return &Addr{ws.config.Location} +} + +// RemoteAddr returns the WebSocket location for the connection for client, or +// the Websocket Origin for server. +func (ws *Conn) RemoteAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Location} + } + return &Addr{ws.config.Origin} +} + +var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn") + +// SetDeadline sets the connection's network read & write deadlines. +func (ws *Conn) SetDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetDeadline(t) + } + return errSetDeadline +} + +// SetReadDeadline sets the connection's network read deadline. +func (ws *Conn) SetReadDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetReadDeadline(t) + } + return errSetDeadline +} + +// SetWriteDeadline sets the connection's network write deadline. +func (ws *Conn) SetWriteDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetWriteDeadline(t) + } + return errSetDeadline +} + +// Config returns the WebSocket config. +func (ws *Conn) Config() *Config { return ws.config } + +// Request returns the http request upgraded to the WebSocket. +// It is nil for client side. +func (ws *Conn) Request() *http.Request { return ws.request } + +// Codec represents a symmetric pair of functions that implement a codec. +type Codec struct { + Marshal func(v interface{}) (data []byte, payloadType byte, err error) + Unmarshal func(data []byte, payloadType byte, v interface{}) (err error) +} + +// Send sends v marshaled by cd.Marshal as single frame to ws. +func (cd Codec) Send(ws *Conn, v interface{}) (err error) { + data, payloadType, err := cd.Marshal(v) + if err != nil { + return err + } + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(payloadType) + if err != nil { + return err + } + _, err = w.Write(data) + w.Close() + return err +} + +// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores +// in v. The whole frame payload is read to an in-memory buffer; max size of +// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds +// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire +// completely. The next call to Receive would read and discard leftover data of +// previous oversized frame before processing next frame. +func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { + ws.rio.Lock() + defer ws.rio.Unlock() + if ws.frameReader != nil { + _, err = io.Copy(ioutil.Discard, ws.frameReader) + if err != nil { + return err + } + ws.frameReader = nil + } +again: + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return err + } + frame, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return err + } + if frame == nil { + goto again + } + maxPayloadBytes := ws.MaxPayloadBytes + if maxPayloadBytes == 0 { + maxPayloadBytes = DefaultMaxPayloadBytes + } + if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) { + // payload size exceeds limit, no need to call Unmarshal + // + // set frameReader to current oversized frame so that + // the next call to this function can drain leftover + // data before processing the next frame + ws.frameReader = frame + return ErrFrameTooLarge + } + payloadType := frame.PayloadType() + data, err := ioutil.ReadAll(frame) + if err != nil { + return err + } + return cd.Unmarshal(data, payloadType, v) +} + +func marshal(v interface{}) (msg []byte, payloadType byte, err error) { + switch data := v.(type) { + case string: + return []byte(data), TextFrame, nil + case []byte: + return data, BinaryFrame, nil + } + return nil, UnknownFrame, ErrNotSupported +} + +func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + switch data := v.(type) { + case *string: + *data = string(msg) + return nil + case *[]byte: + *data = msg + return nil + } + return ErrNotSupported +} + +/* +Message is a codec to send/receive text/binary data in a frame on WebSocket connection. +To send/receive text frame, use string type. +To send/receive binary frame, use []byte type. + +Trivial usage: + + import "websocket" + + // receive text frame + var message string + websocket.Message.Receive(ws, &message) + + // send text frame + message = "hello" + websocket.Message.Send(ws, message) + + // receive binary frame + var data []byte + websocket.Message.Receive(ws, &data) + + // send binary frame + data = []byte{0, 1, 2} + websocket.Message.Send(ws, data) + +*/ +var Message = Codec{marshal, unmarshal} + +func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) { + msg, err = json.Marshal(v) + return msg, TextFrame, err +} + +func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + return json.Unmarshal(msg, v) +} + +/* +JSON is a codec to send/receive JSON data in a frame from a WebSocket connection. + +Trivial usage: + + import "websocket" + + type T struct { + Msg string + Count int + } + + // receive JSON type T + var data T + websocket.JSON.Receive(ws, &data) + + // send JSON type T + websocket.JSON.Send(ws, data) +*/ +var JSON = Codec{jsonMarshal, jsonUnmarshal} diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 0000000000..15167cd746 --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 0000000000..1c4577e968 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go new file mode 100644 index 0000000000..690eb85013 --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -0,0 +1,212 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight // import "golang.org/x/sync/singleflight" + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + c.wg.Done() + g.mu.Lock() + defer g.mu.Unlock() + if !c.forgotten { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 0000000000..06f84b8555 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 0000000000..dcbb14ef35 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "runtime" +) + +// byteOrder is a subset of encoding/binary.ByteOrder. +type byteOrder interface { + Uint32([]byte) uint32 + Uint64([]byte) uint64 +} + +type littleEndian struct{} +type bigEndian struct{} + +func (littleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (bigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// hostByteOrder returns littleEndian on little-endian machines and +// bigEndian on big-endian machines. +func hostByteOrder() byteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "alpha", + "arm", "arm64", + "mipsle", "mips64le", "mips64p32le", + "nios2", + "ppc64le", + "riscv", "riscv64", + "sh": + return littleEndian{} + case "armbe", "arm64be", + "m68k", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "shbe", + "sparc", "sparc64": + return bigEndian{} + } + panic("unknown architecture") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 0000000000..f77701fe86 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,287 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +import ( + "os" + "strings" +) + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasAVX512 bool // Advanced vector extension 512 + HasAVX512F bool // Advanced vector extension 512 Foundation Instructions + HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions + HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions + HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions + HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions + HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions + HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add + HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions + HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision + HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision + HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions + HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations + HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions + HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions + HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions + HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 + HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms + HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + _ CacheLinePad +} + +// ARM contains the supported CPU features of the current ARM (32-bit) platform. +// All feature flags are false if: +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. +var ARM struct { + _ CacheLinePad + HasSWP bool // SWP instruction support + HasHALF bool // Half-word load and store support + HasTHUMB bool // ARM Thumb instruction set + Has26BIT bool // Address space limited to 26-bits + HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support + HasFPA bool // Floating point arithmetic support + HasVFP bool // Vector floating point support + HasEDSP bool // DSP Extensions support + HasJAVA bool // Java instruction set + HasIWMMXT bool // Intel Wireless MMX technology support + HasCRUNCH bool // MaverickCrunch context switching and handling + HasTHUMBEE bool // Thumb EE instruction set + HasNEON bool // NEON instruction set + HasVFPv3 bool // Vector floating point version 3 support + HasVFPv3D16 bool // Vector floating point version 3 D8-D15 + HasTLS bool // Thread local storage support + HasVFPv4 bool // Vector floating point version 4 support + HasIDIVA bool // Integer divide instruction support in ARM mode + HasIDIVT bool // Integer divide instruction support in Thumb mode + HasVFPD32 bool // Vector floating point version 3 D15-D31 + HasLPAE bool // Large Physical Address Extensions + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + _ CacheLinePad +} + +// MIPS64X contains the supported CPU features of the current mips64/mips64le +// platforms. If the current platform is not mips64/mips64le or the current +// operating system is not Linux then all feature flags are false. +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The minimum processor requirement is POWER8 (ISA 2.07). +// The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9) + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} + +func init() { + archInit() + initOptions() + processOptions() +} + +// options contains the cpu debug options that can be used in GODEBUG. +// Options are arch dependent and are added by the arch specific initOptions functions. +// Features that are mandatory for the specific GOARCH should have the Required field set +// (e.g. SSE2 on amd64). +var options []option + +// Option names should be lower case. e.g. avx instead of AVX. +type option struct { + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled + Required bool // whether feature is mandatory and can not be disabled +} + +func processOptions() { + env := os.Getenv("GODEBUG") +field: + for env != "" { + field := "" + i := strings.IndexByte(env, ',') + if i < 0 { + field, env = env, "" + } else { + field, env = env[:i], env[i+1:] + } + if len(field) < 4 || field[:4] != "cpu." { + continue + } + i = strings.IndexByte(field, '=') + if i < 0 { + print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") + continue + } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable || options[i].Required + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field + } + } + + print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + if !o.Enable && o.Required { + print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") + continue + } + + *o.Feature = o.Enable + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go new file mode 100644 index 0000000000..464a209cf5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -0,0 +1,32 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix + +package cpu + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func archInit() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 0000000000..301b752e9c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,73 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +// HWCAP/HWCAP2 bits. +// These are specific to Linux. +const ( + hwcap_SWP = 1 << 0 + hwcap_HALF = 1 << 1 + hwcap_THUMB = 1 << 2 + hwcap_26BIT = 1 << 3 + hwcap_FAST_MULT = 1 << 4 + hwcap_FPA = 1 << 5 + hwcap_VFP = 1 << 6 + hwcap_EDSP = 1 << 7 + hwcap_JAVA = 1 << 8 + hwcap_IWMMXT = 1 << 9 + hwcap_CRUNCH = 1 << 10 + hwcap_THUMBEE = 1 << 11 + hwcap_NEON = 1 << 12 + hwcap_VFPv3 = 1 << 13 + hwcap_VFPv3D16 = 1 << 14 + hwcap_TLS = 1 << 15 + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_IDIVT = 1 << 18 + hwcap_VFPD32 = 1 << 19 + hwcap_LPAE = 1 << 20 + hwcap_EVTSTRM = 1 << 21 + + hwcap2_AES = 1 << 0 + hwcap2_PMULL = 1 << 1 + hwcap2_SHA1 = 1 << 2 + hwcap2_SHA2 = 1 << 3 + hwcap2_CRC32 = 1 << 4 +) + +func initOptions() { + options = []option{ + {Name: "pmull", Feature: &ARM.HasPMULL}, + {Name: "sha1", Feature: &ARM.HasSHA1}, + {Name: "sha2", Feature: &ARM.HasSHA2}, + {Name: "swp", Feature: &ARM.HasSWP}, + {Name: "thumb", Feature: &ARM.HasTHUMB}, + {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, + {Name: "tls", Feature: &ARM.HasTLS}, + {Name: "vfp", Feature: &ARM.HasVFP}, + {Name: "vfpd32", Feature: &ARM.HasVFPD32}, + {Name: "vfpv3", Feature: &ARM.HasVFPv3}, + {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "half", Feature: &ARM.HasHALF}, + {Name: "26bit", Feature: &ARM.Has26BIT}, + {Name: "fastmul", Feature: &ARM.HasFASTMUL}, + {Name: "fpa", Feature: &ARM.HasFPA}, + {Name: "edsp", Feature: &ARM.HasEDSP}, + {Name: "java", Feature: &ARM.HasJAVA}, + {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, + {Name: "crunch", Feature: &ARM.HasCRUNCH}, + {Name: "neon", Feature: &ARM.HasNEON}, + {Name: "idivt", Feature: &ARM.HasIDIVT}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + {Name: "lpae", Feature: &ARM.HasLPAE}, + {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, + {Name: "aes", Feature: &ARM.HasAES}, + {Name: "crc32", Feature: &ARM.HasCRC32}, + } + +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go new file mode 100644 index 0000000000..87dd5e3021 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -0,0 +1,172 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "fp", Feature: &ARM64.HasFP}, + {Name: "asimd", Feature: &ARM64.HasASIMD}, + {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "fphp", Feature: &ARM64.HasFPHP}, + {Name: "jscvt", Feature: &ARM64.HasJSCVT}, + {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "sha3", Feature: &ARM64.HasSHA3}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "sm3", Feature: &ARM64.HasSM3}, + {Name: "sm4", Feature: &ARM64.HasSM4}, + {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, + {Name: "fcma", Feature: &ARM64.HasFCMA}, + {Name: "dcpop", Feature: &ARM64.HasDCPOP}, + {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, + {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + } +} + +func archInit() { + switch runtime.GOOS { + case "freebsd": + readARM64Registers() + case "linux", "netbsd": + doinit() + default: + // Most platforms don't seem to allow reading these registers. + // + // OpenBSD: + // See https://golang.org/issue/31746 + setMinimalFeatures() + } +} + +// setMinimalFeatures fakes the minimal ARM64 features expected by +// TestARM64minimalFeatures. +func setMinimalFeatures() { + ARM64.HasASIMD = true + ARM64.HasFP = true +} + +func readARM64Registers() { + Initialized = true + + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) +} + +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { + // ID_AA64ISAR0_EL1 + switch extractBits(isar0, 4, 7) { + case 1: + ARM64.HasAES = true + case 2: + ARM64.HasAES = true + ARM64.HasPMULL = true + } + + switch extractBits(isar0, 8, 11) { + case 1: + ARM64.HasSHA1 = true + } + + switch extractBits(isar0, 12, 15) { + case 1: + ARM64.HasSHA2 = true + case 2: + ARM64.HasSHA2 = true + ARM64.HasSHA512 = true + } + + switch extractBits(isar0, 16, 19) { + case 1: + ARM64.HasCRC32 = true + } + + switch extractBits(isar0, 20, 23) { + case 2: + ARM64.HasATOMICS = true + } + + switch extractBits(isar0, 28, 31) { + case 1: + ARM64.HasASIMDRDM = true + } + + switch extractBits(isar0, 32, 35) { + case 1: + ARM64.HasSHA3 = true + } + + switch extractBits(isar0, 36, 39) { + case 1: + ARM64.HasSM3 = true + } + + switch extractBits(isar0, 40, 43) { + case 1: + ARM64.HasSM4 = true + } + + switch extractBits(isar0, 44, 47) { + case 1: + ARM64.HasASIMDDP = true + } + + // ID_AA64ISAR1_EL1 + switch extractBits(isar1, 0, 3) { + case 1: + ARM64.HasDCPOP = true + } + + switch extractBits(isar1, 12, 15) { + case 1: + ARM64.HasJSCVT = true + } + + switch extractBits(isar1, 16, 19) { + case 1: + ARM64.HasFCMA = true + } + + switch extractBits(isar1, 20, 23) { + case 1: + ARM64.HasLRCPC = true + } + + // ID_AA64PFR0_EL1 + switch extractBits(pfr0, 16, 19) { + case 0: + ARM64.HasFP = true + case 1: + ARM64.HasFP = true + ARM64.HasFPHP = true + } + + switch extractBits(pfr0, 20, 23) { + case 0: + ARM64.HasASIMD = true + case 1: + ARM64.HasASIMD = true + ARM64.HasASIMDHP = true + } + + switch extractBits(pfr0, 32, 35) { + case 1: + ARM64.HasSVE = true + } +} + +func extractBits(data uint64, start, end uint) uint { + return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s new file mode 100644 index 0000000000..a54436e390 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + // mrs x0, ID_AA64ISAR0_EL1 = d5380600 + WORD $0xd5380600 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + // mrs x0, ID_AA64ISAR1_EL1 = d5380620 + WORD $0xd5380620 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + // mrs x0, ID_AA64PFR0_EL1 = d5380400 + WORD $0xd5380400 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go new file mode 100644 index 0000000000..7b88e865a4 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +package cpu + +func getisar0() uint64 +func getisar1() uint64 +func getpfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 0000000000..568bcd031a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 0000000000..f7cb46971c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build !gccgo + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go new file mode 100644 index 0000000000..53ca8d65c3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package cpu + +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 0000000000..aa986f7782 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c new file mode 100644 index 0000000000..e363c7d131 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +// +// TODO: Replace with a better alternative: +// +// #include +// +// #pragma GCC target("xsave") +// +// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { +// unsigned long long x = _xgetbv(0); +// *eax = x & 0xffffffff; +// *edx = (x >> 32) & 0xffffffff; +// } +// +// Note that _xgetbv is defined starting with GCC 8. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + __asm(" xorl %%ecx, %%ecx\n" + " xgetbv" + : "=a"(*eax), "=d"(*edx)); +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go new file mode 100644 index 0000000000..ba49b91bd3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 0000000000..6fc874f7fe --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !386,!amd64,!amd64p32,!arm64 + +package cpu + +func archInit() { + if err := readHWCAP(); err != nil { + return + } + doinit() + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 0000000000..2057006dce --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 0000000000..79a38a0b9b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,71 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func doinit() { + if err := readHWCAP(); err != nil { + // failed to read /proc/self/auxv, try reading registers directly + readARM64Registers() + return + } + + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go new file mode 100644 index 0000000000..5a41890053 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -0,0 +1,23 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build mips64 mips64le + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go new file mode 100644 index 0000000000..42b5d33cb6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 0000000000..99f8a6399e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64 ppc64le + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 0000000000..1517ac61d3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +func initS390Xbase() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 0000000000..57b5b677de --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 0000000000..cfc1946b7b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go new file mode 100644 index 0000000000..ebfb3fc8e7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go new file mode 100644 index 0000000000..b412efc1bd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,arm + +package cpu + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 0000000000..16c1c4090e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,!netbsd +// +build arm64 + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go new file mode 100644 index 0000000000..f49fad6778 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux +// +build mips64 mips64le + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 0000000000..d28d675b5f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,16 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 0000000000..8b08de341b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build riscv64 + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 0000000000..5881b8833f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,172 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 0000000000..e5037d92e0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 0000000000..5382f2a227 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 0000000000..48d4293319 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,135 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + var osSupportsAVX, osSupportsAVX512 bool + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, ecx7, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) + + X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(28, ebx7) + X86.HasAVX512ER = isSet(27, ebx7) + X86.HasAVX512PF = isSet(26, ebx7) + X86.HasAVX512VL = isSet(31, ebx7) + X86.HasAVX512BW = isSet(30, ebx7) + X86.HasAVX512DQ = isSet(17, ebx7) + X86.HasAVX512IFMA = isSet(21, ebx7) + X86.HasAVX512VBMI = isSet(1, ecx7) + X86.HasAVX5124VNNIW = isSet(2, edx7) + X86.HasAVX5124FMAPS = isSet(3, edx7) + X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) + X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) + X86.HasAVX512VNNI = isSet(11, ecx7) + X86.HasAVX512GFNI = isSet(8, ecx7) + X86.HasAVX512VAES = isSet(9, ecx7) + X86.HasAVX512VBMI2 = isSet(6, ecx7) + X86.HasAVX512BITALG = isSet(12, ecx7) + + eax71, _, _, _ := cpuid(7, 1) + X86.HasAVX512BF16 = isSet(5, eax71) + } +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + buf, err := ioutil.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 0000000000..76fbe40b76 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Morever, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +// +build aix +// +build gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 0000000000..78fe25e86f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,36 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +// +build aix,ppc64 +// +build !gccgo + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 0000000000..e79a538846 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,206 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.13.0 +// source: google/rpc/status.proto + +package status + +import ( + reflect "reflect" + sync "sync" + + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_google_rpc_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_google_rpc_status_proto protoreflect.FileDescriptor + +var file_google_rpc_status_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_status_proto_rawDescOnce sync.Once + file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc +) + +func file_google_rpc_status_proto_rawDescGZIP() []byte { + file_google_rpc_status_proto_rawDescOnce.Do(func() { + file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) + }) + return file_google_rpc_status_proto_rawDescData +} + +var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_rpc_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_rpc_status_proto_depIdxs = []int32{ + 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_rpc_status_proto_init() } +func file_google_rpc_status_proto_init() { + if File_google_rpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_status_proto_goTypes, + DependencyIndexes: file_google_rpc_status_proto_depIdxs, + MessageInfos: file_google_rpc_status_proto_msgTypes, + }.Build() + File_google_rpc_status_proto = out.File + file_google_rpc_status_proto_rawDesc = nil + file_google_rpc_status_proto_goTypes = nil + file_google_rpc_status_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml new file mode 100644 index 0000000000..a11e8cbca6 --- /dev/null +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -0,0 +1,42 @@ +language: go + +matrix: + include: + - go: 1.13.x + env: VET=1 GO111MODULE=on + - go: 1.13.x + env: RACE=1 GO111MODULE=on + - go: 1.13.x + env: RUN386=1 + - go: 1.13.x + env: GRPC_GO_RETRY=on + - go: 1.13.x + env: TESTEXTRAS=1 + - go: 1.12.x + env: GO111MODULE=on + - go: 1.11.x + env: GO111MODULE=on + - go: 1.9.x + env: GAE=1 + +go_import_path: google.golang.org/grpc + +before_install: + - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi + - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi + - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi + - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi + +install: + - try3() { eval "$*" || eval "$*" || eval "$*"; } + - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' + - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi + - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi + +script: + - set -e + - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi + - if [[ -n "${VET}" ]]; then ./vet.sh; fi + - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi + - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi + - make test diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 0000000000..e491a9e7f7 --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 0000000000..9d4213ebca --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 0000000000..4f1567e2f9 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,62 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often times receive PRs that are trying to fix several things at + a time, but only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address different + concerns and everyone will be happy. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. If your contribution introduces new dependencies which are NOT + in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a + discussion with gRPC-Go authors and consultants. + +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made + and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line + to address an issue. PRs with irrelevant changes won't be merged. If you do + want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 weeks + of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs + with messy commit history are difficult to review and won't be merged. Use + `rebase -i upstream/master` to curate your commit history and/or to bring in + latest changes from master (but avoid rebasing in the middle of a code + review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we + can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** before creating your PR to catch breakages + early on. + - `make all` to test everything, OR + - `make vet` to catch vet errors + - `make test` to run the tests + - `make testrace` to run tests in race mode + - optional `make testappengine` to run tests with appengine + +- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 0000000000..d6ff267471 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 0000000000..093c82b3af --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,27 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) +- [canguler](https://github.com/canguler), Google LLC +- [cesarghali](https://github.com/cesarghali), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 0000000000..410f7d56d4 --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,63 @@ +all: vet test testrace + +build: deps + go build google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +deps: + go get -d -v google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: testdeps + go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testsubmodule: testdeps + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + +testappengine: testappenginedeps + goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testappenginedeps: + goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +testrace: testdeps + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +vet: vetdeps + ./vet.sh + +vetdeps: + ./vet.sh -install + +.PHONY: \ + all \ + build \ + clean \ + deps \ + proto \ + test \ + testappengine \ + testappenginedeps \ + testdeps \ + testrace \ + updatedeps \ + updatetestdeps \ + vet \ + vetdeps diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 0000000000..afbc43db51 --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,121 @@ +# gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) +[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) +[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) + +The Go implementation of [gRPC](https://grpc.io/): A high performance, open +source, general RPC framework that puts mobile and HTTP/2 first. For more +information see the [gRPC Quick Start: +Go](https://grpc.io/docs/quickstart/go.html) guide. + +Installation +------------ + +To install this package, you need to install Go and setup your Go workspace on +your computer. The simplest way to install the library is to run: + +``` +$ go get -u google.golang.org/grpc +``` + +With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in +your source code and `go [build|run|test]` will automatically download the +necessary dependencies ([Go modules +ref](https://github.com/golang/go/wiki/Modules)). + +If you are trying to access grpc-go from within China, please see the +[FAQ](#FAQ) below. + +Prerequisites +------------- +gRPC-Go requires Go 1.9 or later. + +Documentation +------------- +- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API + descriptions. +- Documentation on specific topics can be found in the [Documentation + directory](Documentation/). +- Examples can be found in the [examples directory](examples/). + +Performance +----------- +Performance benchmark data for grpc-go and other languages is maintained in +[this +dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). + +Status +------ +General Availability [Google Cloud Platform Launch +Stages](https://cloud.google.com/terms/launch-stages). + +FAQ +--- + +#### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +``` +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- Without Go module support: `git clone` the repo manually: + + ``` + git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc + ``` + + You will need to do the same for all of grpc's dependencies in `golang.org`, + e.g. `golang.org/x/net`. + +- With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + + ``` + go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest + go mod tidy + go mod vendor + go build -mod=vendor + ``` + + Again, this will need to be done for all transitive dependencies hosted on + golang.org as well. Please refer to [this + issue](https://github.com/golang/go/issues/28652) in the golang repo regarding + this concern. + +#### Compiling error, undefined: grpc.SupportPackageIsVersion + +Please update proto package, gRPC package and rebuild the proto files: + - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` + - `go get -u google.golang.org/grpc` + - `protoc --go_out=plugins=grpc:. *.proto` + +#### How to turn on logging + +The default logger is controlled by the environment variables. Turn everything +on by setting: + +``` +GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +#### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 0000000000..68ffc62013 --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// All APIs in this package are EXPERIMENTAL. +package attributes + +import "fmt" + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. +type Attributes struct { + m map[interface{}]interface{} +} + +// New returns a new Attributes containing all key/value pairs in kvs. If the +// same key appears multiple times, the last value overwrites all previous +// values for that key. Panics if len(kvs) is not even. +func New(kvs ...interface{}) *Attributes { + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} + for i := 0; i < len(kvs)/2; i++ { + a.m[kvs[i*2]] = kvs[i*2+1] + } + return a +} + +// WithValues returns a new Attributes containing all key/value pairs in a and +// kvs. Panics if len(kvs) is not even. If the same key appears multiple +// times, the last value overwrites all previous values for that key. To +// remove an existing key, use a nil value. +func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + for k, v := range a.m { + n.m[k] = v + } + for i := 0; i < len(kvs)/2; i++ { + n.m[kvs[i*2]] = kvs[i*2+1] + } + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. +func (a *Attributes) Value(key interface{}) interface{} { + return a.m[key] +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 0000000000..ff7c3ee6f4 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" + + "google.golang.org/grpc/backoff" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This API is EXPERIMENTAL. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 0000000000..0787d0b50c --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specfied +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go new file mode 100644 index 0000000000..a8eb0f4760 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer.go @@ -0,0 +1,391 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "net" + "sync" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/status" +) + +// Address represents a server the client connects to. +// +// Deprecated: please use package balancer. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BalancerConfig specifies the configurations for Balancer. +// +// Deprecated: please use package balancer. May be removed in a future 1.x release. +type BalancerConfig struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// BalancerGetOptions configures a Get call. +// +// Deprecated: please use package balancer. May be removed in a future 1.x release. +type BalancerGetOptions struct { + // BlockingWait specifies whether Get should block when there is no + // connected address. + BlockingWait bool +} + +// Balancer chooses network addresses for RPCs. +// +// Deprecated: please use package balancer. May be removed in a future 1.x release. +type Balancer interface { + // Start does the initialization work to bootstrap a Balancer. For example, + // this function may start the name resolution and watch the updates. It will + // be called when dialing. + Start(target string, config BalancerConfig) error + // Up informs the Balancer that gRPC has a connection to the server at + // addr. It returns down which is called once the connection to addr gets + // lost or closed. + // TODO: It is not clear how to construct and take advantage of the meaningful error + // parameter for down. Need realistic demands to guide. + Up(addr Address) (down func(error)) + // Get gets the address of a server for the RPC corresponding to ctx. + // i) If it returns a connected address, gRPC internals issues the RPC on the + // connection to this address; + // ii) If it returns an address on which the connection is under construction + // (initiated by Notify(...)) but not connected, gRPC internals + // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or + // Shutdown state; + // or + // * issues RPC on the connection otherwise. + // iii) If it returns an address on which the connection does not exist, gRPC + // internals treats it as an error and will fail the corresponding RPC. + // + // Therefore, the following is the recommended rule when writing a custom Balancer. + // If opts.BlockingWait is true, it should return a connected address or + // block if there is no connected address. It should respect the timeout or + // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast + // RPCs), it should return an address it has notified via Notify(...) immediately + // instead of blocking. + // + // The function returns put which is called once the rpc has completed or failed. + // put can collect and report RPC stats to a remote load balancer. + // + // This function should only return the errors Balancer cannot recover by itself. + // gRPC internals will fail the RPC if an error is returned. + Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) + // Notify returns a channel that is used by gRPC internals to watch the addresses + // gRPC needs to connect. The addresses might be from a name resolver or remote + // load balancer. gRPC internals will compare it with the existing connected + // addresses. If the address Balancer notified is not in the existing connected + // addresses, gRPC starts to connect the address. If an address in the existing + // connected addresses is not in the notification list, the corresponding connection + // is shutdown gracefully. Otherwise, there are no operations to take. Note that + // the Address slice must be the full list of the Addresses which should be connected. + // It is NOT delta. + Notify() <-chan []Address + // Close shuts down the balancer. + Close() error +} + +// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch +// the name resolution updates and updates the addresses available correspondingly. +// +// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release. +func RoundRobin(r naming.Resolver) Balancer { + return &roundRobin{r: r} +} + +type addrInfo struct { + addr Address + connected bool +} + +type roundRobin struct { + r naming.Resolver + w naming.Watcher + addrs []*addrInfo // all the addresses the client should potentially connect + mu sync.Mutex + addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. + next int // index of the next address to return for Get() + waitCh chan struct{} // the channel to block when there is no connected address available + done bool // The Balancer is closed. +} + +func (rr *roundRobin) watchAddrUpdates() error { + updates, err := rr.w.Next() + if err != nil { + grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) + return err + } + rr.mu.Lock() + defer rr.mu.Unlock() + for _, update := range updates { + addr := Address{ + Addr: update.Addr, + Metadata: update.Metadata, + } + switch update.Op { + case naming.Add: + var exist bool + for _, v := range rr.addrs { + if addr == v.addr { + exist = true + grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) + break + } + } + if exist { + continue + } + rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) + case naming.Delete: + for i, v := range rr.addrs { + if addr == v.addr { + copy(rr.addrs[i:], rr.addrs[i+1:]) + rr.addrs = rr.addrs[:len(rr.addrs)-1] + break + } + } + default: + grpclog.Errorln("Unknown update.Op ", update.Op) + } + } + // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. + open := make([]Address, len(rr.addrs)) + for i, v := range rr.addrs { + open[i] = v.addr + } + if rr.done { + return ErrClientConnClosing + } + select { + case <-rr.addrCh: + default: + } + rr.addrCh <- open + return nil +} + +func (rr *roundRobin) Start(target string, config BalancerConfig) error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return ErrClientConnClosing + } + if rr.r == nil { + // If there is no name resolver installed, it is not needed to + // do name resolution. In this case, target is added into rr.addrs + // as the only address available and rr.addrCh stays nil. + rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) + return nil + } + w, err := rr.r.Resolve(target) + if err != nil { + return err + } + rr.w = w + rr.addrCh = make(chan []Address, 1) + go func() { + for { + if err := rr.watchAddrUpdates(); err != nil { + return + } + } + }() + return nil +} + +// Up sets the connected state of addr and sends notification if there are pending +// Get() calls. +func (rr *roundRobin) Up(addr Address) func(error) { + rr.mu.Lock() + defer rr.mu.Unlock() + var cnt int + for _, a := range rr.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected { + cnt++ + } + } + // addr is only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + return func(err error) { + rr.down(addr, err) + } +} + +// down unsets the connected state of addr. +func (rr *roundRobin) down(addr Address, err error) { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, a := range rr.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +// Get returns the next addr in the rotation. +func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + if len(rr.addrs) == 0 { + rr.mu.Unlock() + err = status.Errorf(codes.Unavailable, "there is no address available") + return + } + // Returns the next addr on rr.addrs for failfast RPCs. + addr = rr.addrs[rr.next].addr + rr.next++ + rr.mu.Unlock() + return + } + // Wait on rr.waitCh for non-failfast RPCs. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-ch: + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + } + } +} + +func (rr *roundRobin) Notify() <-chan []Address { + return rr.addrCh +} + +func (rr *roundRobin) Close() error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } + rr.done = true + if rr.w != nil { + rr.w.Close() + } + if rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + if rr.addrCh != nil { + close(rr.addrCh) + } + return nil +} + +// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. +// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() +// returns the only address Up by resetTransport(). +type pickFirst struct { + *roundRobin +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 0000000000..9258858ed7 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,454 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[strings.ToLower(b.Name())] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +func init() { + internal.BalancerUnregister = unregisterForTesting +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker V2Picker +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateBalancerState is called by balancer to notify gRPC that some internal + // state in balancer has changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConn. + // + // Deprecated: use UpdateState instead + UpdateBalancerState(s connectivity.State, p Picker) + + // UpdateState notifies gRPC that the balancer's internal state has + // changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConns. + UpdateState(State) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOptions) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle that the Balancer can use. + CredsBundle credentials.Bundle + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) + // ChannelzParentID is the entity parent's channelz unique identification number. + ChannelzParentID int64 + // Target contains the parsed address info of the dial target. It is the same resolver.Target as + // passed to the resolver. + // See the documentation for the resolver.Target type for details about what it contains. + Target resolver.Target +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v1.LoadReport. + ServerLoad interface{} +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure")) +) + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +// +// Deprecated: use V2Picker instead +type Picker interface { + // Pick returns the SubConn to be used to send the RPC. + // The returned SubConn must be one returned by NewSubConn(). + // + // This functions is expected to return: + // - a SubConn that is known to be READY; + // - ErrNoSubConnAvailable if no SubConn is available, but progress is being + // made (for example, some SubConn is in CONNECTING mode); + // - other errors if no active connecting is happening (for example, all SubConn + // are in TRANSIENT_FAILURE mode). + // + // If a SubConn is returned: + // - If it is READY, gRPC will send the RPC on it; + // - If it is not ready, or becomes not ready after it's returned, gRPC will + // block until UpdateBalancerState() is called and will call pick on the + // new picker. The done function returned from Pick(), if not nil, will be + // called with nil error, no bytes sent and no bytes received. + // + // If the returned error is not nil: + // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() + // - If the error is ErrTransientFailure or implements IsTransientFailure() + // bool, returning true: + // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() + // is called to pick again; + // - Otherwise, RPC will fail with unavailable error. + // - Else (error is other non-nil error): + // - The RPC will fail with the error's status code, or Unknown if it is + // not a status error. + // + // The returned done() function will be called once the rpc has finished, + // with the final status of that RPC. If the SubConn returned is not a + // valid SubConn type, done may not be called. done may be nil if balancer + // doesn't care about the RPC status. + Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error) +} + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) +} + +type transientFailureError struct { + error +} + +func (e *transientFailureError) IsTransientFailure() bool { return true } + +// TransientFailureError wraps err in an error implementing +// IsTransientFailure() bool, returning true. +func TransientFailureError(err error) error { + return &transientFailureError{error: err} +} + +// V2Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type V2Picker interface { + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). + // + // If an error is returned: + // + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). + // + // - If the error implements IsTransientFailure() bool, returning true, + // wait for ready RPCs will wait, but non-wait for ready RPCs will be + // terminated with this error's Error() string and status code + // Unavailable. + // + // - Any other errors terminate all RPCs with the code and message + // provided. If the error is not a status error, it will be converted by + // gRPC to a status error with code Unknown. + Pick(info PickInfo) (PickResult, error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // HandleSubConnStateChange is called by gRPC when the connectivity state + // of sc has changed. + // Balancer is expected to aggregate all the state of SubConn and report + // that back to gRPC. + // Balancer should also generate and update Pickers when its internal state has + // been changed by the new state. + // + // Deprecated: if V2Balancer is implemented by the Balancer, + // UpdateSubConnState will be called instead. + HandleSubConnStateChange(sc SubConn, state connectivity.State) + // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to + // balancers. + // Balancer can create new SubConn or remove SubConn with the addresses. + // An empty address slice and a non-nil error will be passed if the resolver returns + // non-nil error to gRPC. + // + // Deprecated: if V2Balancer is implemented by the Balancer, + // UpdateClientConnState will be called instead. + HandleResolvedAddrs([]resolver.Address, error) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + +// V2Balancer is defined for documentation purposes. If a Balancer also +// implements V2Balancer, its UpdateClientConnState method will be called +// instead of HandleResolvedAddrs and its UpdateSubConnState will be called +// instead of HandleSubConnStateChange. +type V2Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +// +// Idle and Shutdown are not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 0000000000..d7d72918ad --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "context" + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + v2PickerBuilder V2PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + bal := &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + v2PickerBuilder: bb.v2PickerBuilder, + + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + config: bb.config, + } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + if bb.pickerBuilder != nil { + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + } else { + bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable) + } + return bal +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + v2PickerBuilder V2PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + v2Picker balancer.V2Picker + config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + panic("not implemented") +} + +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if len(b.subConns) == 0 { + b.state = connectivity.TransientFailure + } + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + if b.picker != nil { + b.cc.UpdateBalancerState(b.state, b.picker) + } else { + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.v2Picker, + }) + } +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // TODO: handle s.ResolverState.Err (log if not nil) once implemented. + // TODO: handle s.ResolverState.ServiceConfig? + if grpclog.V(2) { + grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) + } + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range s.ResolverState.Addresses { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + if err != nil { + grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + if b.pickerBuilder != nil { + b.picker = NewErrPicker(balancer.ErrTransientFailure) + } else { + b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors())) + } + return + } + if b.pickerBuilder != nil { + readySCs := make(map[resolver.Address]balancer.SubConn) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[addr] = sc + } + } + b.picker = b.pickerBuilder.Build(readySCs) + } else { + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} + } + } + b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) + } +} + +func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + panic("not implemented") +} + +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if grpclog.V(2) { + grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } + oldS, ok := b.scStates[sc] + if !ok { + if grpclog.V(2) { + grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + oldAggrState := b.state + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Set or clear the last connection error accordingly. + b.connErr = state.ConnectionError + + // Regenerate picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.regeneratePicker() + } + + if b.picker != nil { + b.cc.UpdateBalancerState(b.state, b.picker) + } else { + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker}) + } +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// NewErrPicker returns a picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, p.err +} + +// NewErrPickerV2 returns a V2Picker that always returns err on Pick(). +func NewErrPickerV2(err error) balancer.V2Picker { + return &errPickerV2{err: err} +} + +type errPickerV2 struct { + err error // Pick() always returns this err. +} + +func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 0000000000..4192918b9e --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build takes a slice of ready SubConns, and returns a picker that will be + // used by gRPC to pick a SubConn. + Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker +} + +// V2PickerBuilder creates balancer.V2Picker. +type V2PickerBuilder interface { + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.V2Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn +} + +// NewBalancerBuilder returns a balancer builder. The balancers +// built by this builder will use the picker builder to build pickers. +func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { + return NewBalancerBuilderWithConfig(name, pb, Config{}) +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. +func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} + +// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config. +func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + v2PickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 0000000000..d4d645501c --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker { + grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable) + } + var scs []balancer.SubConn + for sc := range info.ReadySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: grpcrand.Intn(len(scs)), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return balancer.PickResult{SubConn: sc}, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 0000000000..824f28e740 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,271 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancerMu sync.Mutex // synchronizes calls to the balancer + balancer balancer.Balancer + scBuffer *buffer.Unbounded + done *grpcsync.Event + + mu sync.Mutex + subConns map[*acBalancerWrapper]struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + scBuffer: buffer.NewUnbounded(), + done: grpcsync.NewEvent(), + subConns: make(map[*acBalancerWrapper]struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequentially, so the balancer can be implemented +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.scBuffer.Get(): + ccb.scBuffer.Load() + if ccb.done.HasFired() { + break + } + ccb.balancerMu.Lock() + su := t.(*scStateUpdate) + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) + } else { + ccb.balancer.HandleSubConnStateChange(su.sc, su.state) + } + ccb.balancerMu.Unlock() + case <-ccb.done.Done(): + } + + if ccb.done.HasFired() { + ccb.balancer.Close() + ccb.mu.Lock() + scs := ccb.subConns + ccb.subConns = nil + ccb.mu.Unlock() + for acbw := range scs { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } + ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + return + } + } +} + +func (ccb *ccBalancerWrapper) close() { + ccb.done.Fire() +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.scBuffer.Put(&scStateUpdate{ + sc: sc, + state: s, + err: err, + }) +} + +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + return ub.UpdateClientConnState(*ccs) + } + ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil) + return nil +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + ccb.balancerMu.Lock() + ub.ResolverError(err) + ccb.balancerMu.Unlock() + } +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + } + ac, err := ccb.cc.newAddrConn(addrs, opts) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + delete(ccb.subConns, acbw) + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePicker(p) + ccb.cc.csMgr.updateState(s) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePickerV2(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.tearDown(errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + opts := acbw.ac.scopts + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs, opts) + if err != nil { + grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + if acState != connectivity.Idle { + ac.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go new file mode 100644 index 0000000000..db04b08b84 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -0,0 +1,334 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type balancerWrapperBuilder struct { + b Balancer // The v1 balancer. +} + +func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ + DialCreds: opts.DialCreds, + Dialer: opts.Dialer, + }) + _, pickfirst := bwb.b.(*pickFirst) + bw := &balancerWrapper{ + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + targetAddr: opts.Target.Endpoint, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + state: connectivity.Idle, + } + cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw}) + go bw.lbWatcher() + return bw +} + +func (bwb *balancerWrapperBuilder) Name() string { + return "wrapper" +} + +type scState struct { + addr Address // The v1 address type. + s connectivity.State + down func(error) +} + +type balancerWrapper struct { + balancer Balancer // The v1 balancer. + pickfirst bool + + cc balancer.ClientConn + targetAddr string // Target without the scheme. + + mu sync.Mutex + conns map[resolver.Address]balancer.SubConn + connSt map[balancer.SubConn]*scState + // This channel is closed when handling the first resolver result. + // lbWatcher blocks until this is closed, to avoid race between + // - NewSubConn is created, cc wants to notify balancer of state changes; + // - Build hasn't return, cc doesn't have access to balancer. + startCh chan struct{} + + // To aggregate the connectivity state. + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State +} + +// lbWatcher watches the Notify channel of the balancer and manages +// connections accordingly. +func (bw *balancerWrapper) lbWatcher() { + <-bw.startCh + notifyCh := bw.balancer.Notify() + if notifyCh == nil { + // There's no resolver in the balancer. Connect directly. + a := resolver.Address{ + Addr: bw.targetAddr, + Type: resolver.Backend, + } + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: Address{Addr: bw.targetAddr}, + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + return + } + + for addrs := range notifyCh { + grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) + if bw.pickfirst { + var ( + oldA resolver.Address + oldSC balancer.SubConn + ) + bw.mu.Lock() + for oldA, oldSC = range bw.conns { + break + } + bw.mu.Unlock() + if len(addrs) <= 0 { + if oldSC != nil { + // Teardown old sc. + bw.mu.Lock() + delete(bw.conns, oldA) + delete(bw.connSt, oldSC) + bw.mu.Unlock() + bw.cc.RemoveSubConn(oldSC) + } + continue + } + + var newAddrs []resolver.Address + for _, a := range addrs { + newAddr := resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + } + newAddrs = append(newAddrs, newAddr) + } + if oldSC == nil { + // Create new sc. + sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) + } else { + bw.mu.Lock() + // For pickfirst, there should be only one SubConn, so the + // address doesn't matter. All states updating (up and down) + // and picking should all happen on that only SubConn. + bw.conns[resolver.Address{}] = sc + bw.connSt[sc] = &scState{ + addr: addrs[0], // Use the first address. + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } else { + bw.mu.Lock() + bw.connSt[oldSC].addr = addrs[0] + bw.mu.Unlock() + oldSC.UpdateAddresses(newAddrs) + } + } else { + var ( + add []resolver.Address // Addresses need to setup connections. + del []balancer.SubConn // Connections need to tear down. + ) + resAddrs := make(map[resolver.Address]Address) + for _, a := range addrs { + resAddrs[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + }] = a + } + bw.mu.Lock() + for a := range resAddrs { + if _, ok := bw.conns[a]; !ok { + add = append(add, a) + } + } + for a, c := range bw.conns { + if _, ok := resAddrs[a]; !ok { + del = append(del, c) + delete(bw.conns, a) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. + } + } + bw.mu.Unlock() + for _, a := range add { + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: resAddrs[a], + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } + for _, c := range del { + bw.cc.RemoveSubConn(c) + } + } + } +} + +func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + bw.mu.Lock() + defer bw.mu.Unlock() + scSt, ok := bw.connSt[sc] + if !ok { + return + } + if s == connectivity.Idle { + sc.Connect() + } + oldS := scSt.s + scSt.s = s + if oldS != connectivity.Ready && s == connectivity.Ready { + scSt.down = bw.balancer.Up(scSt.addr) + } else if oldS == connectivity.Ready && s != connectivity.Ready { + if scSt.down != nil { + scSt.down(errConnClosing) + } + } + sa := bw.csEvltr.RecordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw}) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } +} + +func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + // There should be a resolver inside the balancer. + // All updates here, if any, are ignored. +} + +func (bw *balancerWrapper) Close() { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + bw.balancer.Close() +} + +// The picker is the balancerWrapper itself. +// It either blocks or returns error, consistent with v1 balancer Get(). +func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) { + failfast := true // Default failfast is true. + if ss, ok := rpcInfoFromContext(info.Ctx); ok { + failfast = ss.failfast + } + a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast}) + if err != nil { + return balancer.PickResult{}, toRPCErr(err) + } + if p != nil { + result.Done = func(balancer.DoneInfo) { p() } + defer func() { + if err != nil { + p() + } + }() + } + + bw.mu.Lock() + defer bw.mu.Unlock() + if bw.pickfirst { + // Get the first sc in conns. + for _, result.SubConn = range bw.conns { + return result, nil + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + var ok1 bool + result.SubConn, ok1 = bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + s, ok2 := bw.connSt[result.SubConn] + if !ok1 || !ok2 { + // This can only happen due to a race where Get() returned an address + // that was subsequently removed by Notify. In this case we should + // retry always. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + switch s.s { + case connectivity.Ready, connectivity.Idle: + return result, nil + case connectivity.Shutdown, connectivity.TransientFailure: + // If the returned sc has been shut down or is in transient failure, + // return error, and this RPC will fail or wait for another picker (if + // non-failfast). + return balancer.PickResult{}, balancer.ErrTransientFailure + default: + // For other states (connecting or unknown), the v1 balancer would + // traditionally wait until ready and then issue the RPC. Returning + // ErrNoSubConnAvailable will be a slight improvement in that it will + // allow the balancer to choose another address in case others are + // connected. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 0000000000..f393bb6618 --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,900 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto + +package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +var GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", +} +var GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, +} + +func (x GrpcLogEntry_EventType) String() string { + return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) +} +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +var GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", +} +var GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, +} + +func (x GrpcLogEntry_Logger) String() string { + return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) +} +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +var Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", +} +var Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, +} + +func (x Address_Type) String() string { + return proto.EnumName(Address_Type_name, int32(x)) +} +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + // The timestamp of the binary log message + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are valid to be assigned to Payload: + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } +func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } +func (*GrpcLogEntry) ProtoMessage() {} +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} +} +func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) +} +func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) +} +func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrpcLogEntry.Merge(dst, src) +} +func (m *GrpcLogEntry) XXX_Size() int { + return xxx_messageInfo_GrpcLogEntry.Size(m) +} +func (m *GrpcLogEntry) XXX_DiscardUnknown() { + xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo + +func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *GrpcLogEntry) GetCallId() uint64 { + if m != nil { + return m.CallId + } + return 0 +} + +func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if m != nil { + return m.SequenceIdWithinCall + } + return 0 +} + +func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if m != nil { + return m.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if m != nil { + return m.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (m *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (m *GrpcLogEntry) GetMessage() *Message { + if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (m *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (m *GrpcLogEntry) GetPayloadTruncated() bool { + if m != nil { + return m.PayloadTruncated + } + return false +} + +func (m *GrpcLogEntry) GetPeer() *Address { + if m != nil { + return m.Peer + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } +} + +func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GrpcLogEntry) + // payload + switch x := m.Payload.(type) { + case *GrpcLogEntry_ClientHeader: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientHeader); err != nil { + return err + } + case *GrpcLogEntry_ServerHeader: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ServerHeader); err != nil { + return err + } + case *GrpcLogEntry_Message: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Message); err != nil { + return err + } + case *GrpcLogEntry_Trailer: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Trailer); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) + } + return nil +} + +func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GrpcLogEntry) + switch tag { + case 6: // payload.client_header + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientHeader) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_ClientHeader{msg} + return true, err + case 7: // payload.server_header + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerHeader) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_ServerHeader{msg} + return true, err + case 8: // payload.message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Message) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_Message{msg} + return true, err + case 9: // payload.trailer + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Trailer) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_Trailer{msg} + return true, err + default: + return false, nil + } +} + +func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GrpcLogEntry) + // payload + switch x := m.Payload.(type) { + case *GrpcLogEntry_ClientHeader: + s := proto.Size(x.ClientHeader) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_ServerHeader: + s := proto.Size(x.ServerHeader) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_Message: + s := proto.Size(x.Message) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_Trailer: + s := proto.Size(x.Trailer) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ClientHeader struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // // + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identitiy. + // It is typically a portion of the URI in the form of + // or : . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientHeader) Reset() { *m = ClientHeader{} } +func (m *ClientHeader) String() string { return proto.CompactTextString(m) } +func (*ClientHeader) ProtoMessage() {} +func (*ClientHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} +} +func (m *ClientHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientHeader.Unmarshal(m, b) +} +func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) +} +func (dst *ClientHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientHeader.Merge(dst, src) +} +func (m *ClientHeader) XXX_Size() int { + return xxx_messageInfo_ClientHeader.Size(m) +} +func (m *ClientHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ClientHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientHeader proto.InternalMessageInfo + +func (m *ClientHeader) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ClientHeader) GetMethodName() string { + if m != nil { + return m.MethodName + } + return "" +} + +func (m *ClientHeader) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *ClientHeader) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +type ServerHeader struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerHeader) Reset() { *m = ServerHeader{} } +func (m *ServerHeader) String() string { return proto.CompactTextString(m) } +func (*ServerHeader) ProtoMessage() {} +func (*ServerHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} +} +func (m *ServerHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerHeader.Unmarshal(m, b) +} +func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) +} +func (dst *ServerHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerHeader.Merge(dst, src) +} +func (m *ServerHeader) XXX_Size() int { + return xxx_messageInfo_ServerHeader.Size(m) +} +func (m *ServerHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ServerHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerHeader proto.InternalMessageInfo + +func (m *ServerHeader) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +type Trailer struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Trailer) Reset() { *m = Trailer{} } +func (m *Trailer) String() string { return proto.CompactTextString(m) } +func (*Trailer) ProtoMessage() {} +func (*Trailer) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} +} +func (m *Trailer) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Trailer.Unmarshal(m, b) +} +func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) +} +func (dst *Trailer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Trailer.Merge(dst, src) +} +func (m *Trailer) XXX_Size() int { + return xxx_messageInfo_Trailer.Size(m) +} +func (m *Trailer) XXX_DiscardUnknown() { + xxx_messageInfo_Trailer.DiscardUnknown(m) +} + +var xxx_messageInfo_Trailer proto.InternalMessageInfo + +func (m *Trailer) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Trailer) GetStatusCode() uint32 { + if m != nil { + return m.StatusCode + } + return 0 +} + +func (m *Trailer) GetStatusMessage() string { + if m != nil { + return m.StatusMessage + } + return "" +} + +func (m *Trailer) GetStatusDetails() []byte { + if m != nil { + return m.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Message.Unmarshal(m, b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) +} +func (dst *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(dst, src) +} +func (m *Message) XXX_Size() int { + return xxx_messageInfo_Message.Size(m) +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +func (m *Message) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) +} +func (dst *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(dst, src) +} +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetEntry() []*MetadataEntry { + if m != nil { + return m.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } +func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } +func (*MetadataEntry) ProtoMessage() {} +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} +} +func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) +} +func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) +} +func (dst *MetadataEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetadataEntry.Merge(dst, src) +} +func (m *MetadataEntry) XXX_Size() int { + return xxx_messageInfo_MetadataEntry.Size(m) +} +func (m *MetadataEntry) XXX_DiscardUnknown() { + xxx_messageInfo_MetadataEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo + +func (m *MetadataEntry) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *MetadataEntry) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Address information +type Address struct { + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Address) Reset() { *m = Address{} } +func (m *Address) String() string { return proto.CompactTextString(m) } +func (*Address) ProtoMessage() {} +func (*Address) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} +} +func (m *Address) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Address.Unmarshal(m, b) +} +func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Address.Marshal(b, m, deterministic) +} +func (dst *Address) XXX_Merge(src proto.Message) { + xxx_messageInfo_Address.Merge(dst, src) +} +func (m *Address) XXX_Size() int { + return xxx_messageInfo_Address.Size(m) +} +func (m *Address) XXX_DiscardUnknown() { + xxx_messageInfo_Address.DiscardUnknown(m) +} + +var xxx_messageInfo_Address proto.InternalMessageInfo + +func (m *Address) GetType() Address_Type { + if m != nil { + return m.Type + } + return Address_TYPE_UNKNOWN +} + +func (m *Address) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *Address) GetIpPort() uint32 { + if m != nil { + return m.IpPort + } + return 0 +} + +func init() { + proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") + proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") + proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") + proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer") + proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message") + proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") + proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") + proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) + proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) +} + +func init() { + proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) +} + +var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ + // 900 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, + 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, + 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, + 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, + 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, + 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, + 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, + 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, + 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, + 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, + 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, + 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, + 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, + 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, + 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, + 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, + 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, + 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, + 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, + 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, + 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, + 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, + 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, + 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, + 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, + 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, + 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, + 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, + 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, + 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, + 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, + 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, + 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, + 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, + 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, + 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, + 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, + 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, + 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, + 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, + 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, + 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, + 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, + 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, + 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, + 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, + 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, + 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, + 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, + 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, + 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, + 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, + 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, + 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, + 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, + 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, + 0xd4, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 0000000000..9e20e4d385 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 0000000000..f58740b250 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1568 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "net" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., OAuth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + blockingpicker: newPickerWrapper(), + czData: new(channelzData), + firstResolveEvent: grpcsync.NewEvent(), + } + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + defer func() { + if err != nil { + cc.Close() + } + }() + + if channelz.IsOn() { + if cc.dopts.channelzParentID != 0 { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), + Severity: channelz.CtINFO, + }, + }) + } else { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtINFO, + }) + } + cc.csMgr.channelzID = cc.channelzID + } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + } else { + if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.Dialer == nil { + cc.dopts.copts.Dialer = newProxyDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + network, addr := parseDialTarget(addr) + return (&net.Dialer{}).DialContext(ctx, network, addr) + }, + ) + } + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + conn, err = nil, ctx.Err() + default: + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + scSet = true + } + default: + } + } + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.DefaultExponential + } + + // Determine the resolver to use. + cc.parsedTarget = parseTarget(cc.target) + grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) + resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + // If resolver builder is still nil, the parsed target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original target. + grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, + } + resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) + } + } + + creds := cc.dopts.copts.TransportCredentials + if creds != nil && creds.Info().ServerName != "" { + cc.authority = creds.Info().ServerName + } else if cc.dopts.insecure && cc.dopts.authority != "" { + cc.authority = cc.dopts.authority + } else { + // Use endpoint from "scheme://authority/endpoint" as the default + // authority for ClientConn. + cc.authority = cc.parsedTarget.Endpoint + } + + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + } + + // Build the resolver. + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + cc.mu.Lock() + cc.resolverWrapper = rWrapper + cc.mu.Unlock() + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.blockingpicker.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID int64 +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + if channelz.IsOn() { + channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel Connectivity change to %v", state), + Severity: channelz.CtINFO, + }) + } + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager + + balancerBuildOpts balancer.BuildOptions + blockingpicker *pickerWrapper + + mu sync.RWMutex + resolverWrapper *ccResolverWrapper + sc *ServiceConfig + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. + mkp keepalive.ClientParameters + curBalancerName string + balancerWrapper *ccBalancerWrapper + retryThrottler atomic.Value + + firstResolveEvent *grpcsync.Event + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revisit this decision in the future. + cc.sc = &sc + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}") + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, addrs) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs) + } +} + +func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() + cc.mu.Lock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } + + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig(nil) + + if cc.balancerWrapper != nil { + cc.balancerWrapper.resolverError(err) + } + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState + } + + var ret error + if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig(s.Addresses) + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + cc.applyServiceConfigAndBalancer(sc, s.Addresses) + } else { + ret = balancer.ErrBadResolverState + if cc.balancerWrapper == nil { + var err error + if s.ServiceConfig.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) + } + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) + cc.mu.Unlock() + return ret + } + } + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig.cfg + } + + cbn := cc.curBalancerName + bw := cc.balancerWrapper + cc.mu.Unlock() + if cbn != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + for i := 0; i < len(s.Addresses); { + if s.Addresses[i].Type == resolver.GRPCLB { + copy(s.Addresses[i:], s.Addresses[i+1:]) + s.Addresses = s.Addresses[:len(s.Addresses)-1] + continue + } + i++ + } + } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret +} + +// switchBalancer starts the switching from current balancer to the balancer +// with the given name. +// +// It will NOT send the current address list to the new balancer. If needed, +// caller of this function should send address list to the new balancer after +// this function returns. +// +// Caller must hold cc.mu. +func (cc *ClientConn) switchBalancer(name string) { + if strings.EqualFold(cc.curBalancerName, name) { + return + } + + grpclog.Infof("ClientConn switching balancer to %q", name) + if cc.dopts.balancerBuilder != nil { + grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") + return + } + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() + } + + builder := balancer.Get(name) + if channelz.IsOn() { + if builder == nil { + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), + Severity: channelz.CtWarning, + }) + } else { + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), + Severity: channelz.CtINFO, + }) + } + } + if builder == nil { + grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) + builder = newPickfirstBuilder() + } + + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s, err) + cc.mu.Unlock() +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + ac := &addrConn{ + cc: cc, + addrs: addrs, + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return nil, ErrClientConnClosing + } + if channelz.IsOn() { + ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel Created", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), + Severity: channelz.CtINFO, + }, + }) + } + cc.conns[ac] = struct{}{} + cc.mu.Unlock() + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + return &channelz.ChannelInternalMetric{ + State: cc.GetState(), + Target: cc.target, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + } +} + +// Target returns the target string of the ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) Target() string { + return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { + atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + // Update connectivity state within the lock to prevent subsequent or + // concurrent calls from resetting the transport more than once. + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + // Start a goroutine connecting to the server asynchronously. + go ac.resetTransport() + return nil +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + ac.addrs = addrs + return true + } + + if ac.state == connectivity.Connecting { + return false + } + + // ac.state is Ready, try to find the connected address. + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return MethodConfig{} + } + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m = cc.sc.Methods[method[:i+1]] + } + return m +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, + FullMethodName: method, + }) + if err != nil { + return nil, nil, toRPCErr(err) + } + return t, done, nil +} + +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) { + if sc == nil { + // should never reach here. + return + } + cc.sc = sc + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.curBalancerName = cc.dopts.balancerBuilder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { + cc.mu.RLock() + r := cc.resolverWrapper + cc.mu.RUnlock() + if r == nil { + return + } + go r.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// This API is EXPERIMENTAL. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil + cc.mu.Unlock() + + cc.blockingpicker.close() + + if rWrapper != nil { + rWrapper.close() + } + if bWrapper != nil { + bWrapper.close() + } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + if channelz.IsOn() { + ted := &channelz.TraceEventDesc{ + Desc: "Channel Deleted", + Severity: channelz.CtINFO, + } + if cc.dopts.channelzParentID != 0 { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), + Severity: channelz.CtINFO, + } + } + channelz.AddTraceEvent(cc.channelzID, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(cc.channelzID) + } + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw balancer.SubConn + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelzID int64 // channelz unique identification number. + czData *channelzData +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { + if ac.state == s { + return + } + + updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) + ac.state = s + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: updateMsg, + Severity: channelz.CtINFO, + }) + } + ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +func (ac *addrConn) resetTransport() { + for i := 0; ; i++ { + if i > 0 { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.transport = nil + ac.mu.Unlock() + + newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) + if err != nil { + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return + } + continue + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + newTr.Close() + return + } + ac.curAddr = addr + ac.transport = newTr + ac.backoffIdx = 0 + + hctx, hcancel := context.WithCancel(ac.ctx) + ac.startHealthCheck(hctx) + ac.mu.Unlock() + + // Block until the created transport is down. And when this happens, + // we restart from the top of the addr list. + <-reconnect.Done() + hcancel() + // restart connecting - the top of the loop will set state to + // CONNECTING. This is against the current connectivity semantics doc, + // however it allows for graceful behavior for RPCs not yet dispatched + // - unfortunate timing would otherwise lead to the RPC failing even + // though the TRANSIENT_FAILURE state (called for by the doc) would be + // instantaneous. + // + // Ideally we should transition to Idle here and block until there is + // RPC activity that leads to the balancer requesting a reconnect of + // the associated SubConn. + } +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at the +// first successful one. It returns the transport, the address and a Event in +// the successful case. The Event fires when the returned transport disconnects. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { + var firstConnErr error + for _, addr := range addrs { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return nil, resolver.Address{}, nil, errConnClosing + } + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), + Severity: channelz.CtINFO, + }) + } + + newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + if err == nil { + return newTr, addr, reconnect, nil + } + if firstConnErr == nil { + firstConnErr = err + } + ac.cc.blockingpicker.updateConnectionError(err) + } + + // Couldn't connect to any address. + return nil, resolver.Address{}, nil, firstConnErr +} + +// createTransport creates a connection to addr. It returns the transport and a +// Event in the successful case. The Event fires when the returned transport +// disconnects. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { + prefaceReceived := make(chan struct{}) + onCloseCalled := make(chan struct{}) + reconnect := grpcsync.NewEvent() + + authority := ac.cc.authority + // addr.ServerName takes precedent over ClientConn authority, if present. + if addr.ServerName != "" { + authority = addr.ServerName + } + + target := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + Authority: authority, + } + + once := sync.Once{} + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting, nil) + } + }) + ac.mu.Unlock() + reconnect.Fire() + } + + onClose := func() { + ac.mu.Lock() + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting, nil) + } + }) + ac.mu.Unlock() + close(onCloseCalled) + reconnect.Fire() + } + + onPrefaceReceipt := func() { + close(prefaceReceived) + } + + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + defer cancel() + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) + if err != nil { + // newTr is either nil, or closed. + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + return nil, nil, err + } + + select { + case <-time.After(time.Until(connectDeadline)): + // We didn't get the preface in time. + newTr.Close() + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + return nil, nil, errors.New("timed out waiting for server handshake") + case <-prefaceReceived: + // We got the preface - huzzah! things are good. + case <-onCloseCalled: + // The transport has already closed - noop. + return nil, nil, errors.New("connection closed") + // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + } + return newTr, reconnect, nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready, nil) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + grpclog.Error("Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (interface{}, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State, lastErr error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s, lastErr) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", + Severity: channelz.CtError, + }) + } + grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready && ac.transport != nil { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect() + } + return nil, false +} + +// tearDown starts to tear down the addrConn. +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many addrConn's in a +// tight loop. +// tearDown doesn't remove ac from ac.cc.conns. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} + if err == errConnDrain && curTr != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. + ac.mu.Unlock() + curTr.GracefulClose() + ac.mu.Lock() + } + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel Deleted", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), + Severity: channelz.CtINFO, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(ac.channelzID) + } + ac.mu.Unlock() +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + return &channelz.ChannelInternalMetric{ + State: ac.getState(), + Target: addr, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + } +} + +func (ac *addrConn) incrCallsStarted() { + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if cc.parsedTarget.Scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(cc.parsedTarget.Scheme) +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 0000000000..1297765478 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh new file mode 100644 index 0000000000..4cdc6ba7c0 --- /dev/null +++ b/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 0000000000..0b206a5782 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import "strconv" + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 0000000000..02738839dd --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,198 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 0000000000..34ec36fbf6 --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "context" + + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 0000000000..845ce5d216 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,251 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status + // for the RPC. uri is the URI of the entry point for the request. + // When supported by the underlying implementation, ctx can be used for + // timeout and cancellation. Additionally, RequestInfo data will be + // available via ctx to this call. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // NoSecurity indicates a connection is insecure. + // The zero SecurityLevel value is invalid for backward compatibility. + NoSecurity SecurityLevel = iota + 1 + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo { + return c +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + // The auth information should embed CommonAuthInfo to return additional information about + // the credentials. Implementations must use the provided context to implement timely cancellation. + // gRPC will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). + // If the returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. + // gRPC internals also use it to override the virtual hosting name if it is set. + // It must be called before dialing. Currently, this is only used by grpclb. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + TransportCredentials() TransportCredentials + PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo +} + +// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. +type requestInfoKey struct{} + +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) + return +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() *CommonAuthInfo + } + ri, _ := RequestInfoFromContext(ctx) + if ri.AuthInfo == nil { + return errors.New("unable to obtain SecurityLevel from context") + } + if ci, ok := ri.AuthInfo.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == 0 { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + +func init() { + internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) + } +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +// +// This API is experimental. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +// +// This API is experimental. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +// +// This API is experimental. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} diff --git a/vendor/google.golang.org/grpc/credentials/go12.go b/vendor/google.golang.org/grpc/credentials/go12.go new file mode 100644 index 0000000000..ccbf35b331 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/go12.go @@ -0,0 +1,30 @@ +// +build go1.12 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import "crypto/tls" + +// This init function adds cipher suite constants only defined in Go 1.12. +func init() { + cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" + cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" + cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" +} diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go new file mode 100644 index 0000000000..2f4472becc --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go @@ -0,0 +1,61 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains credentials-internal code. +package internal + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go new file mode 100644 index 0000000000..d4346e9eab --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go @@ -0,0 +1,30 @@ +// +build appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// WrapSyscallConn returns newConn on appengine. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + return newConn +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 0000000000..28b4f6232d --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,225 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + + "google.golang.org/grpc/credentials/internal" +) + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +const alpnProtoStrH2 = "h2" + +func appendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} + tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// This API is EXPERIMENTAL. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +} + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 0000000000..63f5ae21df --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,594 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "net" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by v1 balancer dial option WithBalancer to support v1 + // balancer, and also by WithBalancerName dial option. + balancerBuilder balancer.Builder + channelzParentID int64 + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string + // This is used by ccResolverWrapper to backoff between successive calls to + // resolver.ResolveNow(). The user will have no need to configure this, but + // we need to be able to configure this in tests. + resolveNowBackoff func(int) time.Duration + resolvers []resolver.Builder +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// This API is EXPERIMENTAL. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero will disable read buffer for +// a connection so data framer can access the underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// +// Deprecated: use the new balancer APIs in balancer package and +// WithBalancerName. Will be removed in a future 1.x release. +func WithBalancer(b Balancer) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + }) +} + +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig +// instead. Will be removed in a future 1.x release. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = builder + }) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver or via +// WithDefaultServiceConfig, as specified at +// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be +// removed in a future 1.x release. +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + +// WithConnectParams configures the dialer to use the provided ConnectParams. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +// +// This API is EXPERIMENTAL. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffConfig(b BackoffConfig) DialOption { + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs internalbackoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Note that transport security is required unless WithInsecure is +// set. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.insecure = true + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// This API is experimental. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext instead of Dial and context.WithTimeout +// instead. Will be supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.StatsHandler = h + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// This is an EXPERIMENTAL API. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header. This value only works with WithInsecure and has no +// effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +// +// This API is EXPERIMENTAL. +func WithChannelzParentID(id int64) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used. +// 2. Resolver does not return a service config or if the resolver returns an +// invalid service config. +// +// This API is EXPERIMENTAL. +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +// +// Retry support is currently disabled by default, but will be enabled by +// default in the future. Until then, it may be enabled by setting the +// environment variable "GRPC_GO_RETRY" to "on". +// +// This API is EXPERIMENTAL. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// This API is EXPERIMENTAL. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + disableRetry: !envconfig.Retry, + healthCheckFunc: internal.HealthCheckFunc, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, + }, + resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, + } +} + +// withGetMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} + +// withResolveNowBackoff specifies the function that clientconn uses to backoff +// between successive calls to resolver.ResolveNow(). +// +// For testing purpose only. +func withResolveNowBackoff(f func(int) time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolveNowBackoff = f + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// This API is EXPERIMENTAL. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 0000000000..187adbb117 --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,24 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 0000000000..195e8448b6 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,122 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// This package is EXPERIMENTAL. +package encoding + +import ( + "io" + "strings" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string + // EXPERIMENTAL: if a Compressor implements + // DecompressedSize(compressedBytes []byte) int, gRPC will call it + // to determine the size of the buffer allocated for the result of decompression. + // Return -1 to indicate unknown size. +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 0000000000..66b97a6f69 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "math" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +type cachedProtoBuffer struct { + lastMarshaledSize uint32 + proto.Buffer +} + +func capToMaxInt32(val int) uint32 { + if val > math.MaxInt32 { + return uint32(math.MaxInt32) + } + return uint32(val) +} + +func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { + protoMsg := v.(proto.Message) + newSlice := make([]byte, 0, cb.lastMarshaledSize) + + cb.SetBuf(newSlice) + cb.Reset() + if err := cb.Marshal(protoMsg); err != nil { + return nil, err + } + out := cb.Bytes() + cb.lastMarshaledSize = capToMaxInt32(len(out)) + return out, nil +} + +func (codec) Marshal(v interface{}) ([]byte, error) { + if pm, ok := v.(proto.Marshaler); ok { + // object can marshal itself, no need for buffer + return pm.Marshal() + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + out, err := marshal(v, cb) + + // put back buffer and lose the ref to the slice + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return out, err +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + protoMsg := v.(proto.Message) + protoMsg.Reset() + + if pu, ok := protoMsg.(proto.Unmarshaler); ok { + // object can unmarshal itself, no need for buffer + return pu.Unmarshal(data) + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + cb.SetBuf(data) + err := cb.Unmarshal(protoMsg) + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return err +} + +func (codec) Name() string { + return Name +} + +var protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, +} diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod new file mode 100644 index 0000000000..2378361302 --- /dev/null +++ b/vendor/google.golang.org/grpc/go.mod @@ -0,0 +1,16 @@ +module google.golang.org/grpc + +go 1.11 + +require ( + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 + github.com/envoyproxy/protoc-gen-validate v0.1.0 + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + github.com/golang/mock v1.1.1 + github.com/golang/protobuf v1.3.2 + github.com/google/go-cmp v0.2.0 + golang.org/x/net v0.0.0-20190311183353-d8887717615a + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 +) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum new file mode 100644 index 0000000000..dd5d0cee7a --- /dev/null +++ b/vendor/google.golang.org/grpc/go.sum @@ -0,0 +1,53 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 0000000000..874ea6d98a --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import "os" + +var logger = newLoggerV2() + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { + logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { + logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 0000000000..097494f710 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 0000000000..d493257769 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + logger = l +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh new file mode 100644 index 0000000000..7c7bcada50 --- /dev/null +++ b/vendor/google.golang.org/grpc/install_gae.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +TMP=$(mktemp -d /tmp/sdk.XXX) \ +&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ +&& unzip -q $TMP.zip -d $TMP \ +&& export PATH="$PATH:$TMP/go_appengine" diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 0000000000..8b7350022a --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,77 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC +// and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the +// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as +// the status message of the RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 0000000000..5fc0ee3da5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + grpcbackoff "google.golang.org/grpc/backoff" + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return bc.Config.BaseDelay + } + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.Config.Multiplier + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 0000000000..3a905d9665 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) interface{} +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) interface{} { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 0000000000..8b10516749 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" +) + +// Logger is the global binary logger. It can be used to get binary logger for +// each method. +type Logger interface { + getMethodLogger(methodName string) *MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment variable or flags). +// +// It is used to get a methodLogger for each individual method. +var binLogger Logger + +// SetLogger sets the binarg logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) *MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.getMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +type methodLoggerConfig struct { + // Max length of header and message. + hdr, msg uint64 +} + +type logger struct { + all *methodLoggerConfig + services map[string]*methodLoggerConfig + methods map[string]*methodLoggerConfig + + blacklist map[string]struct{} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { + if l.all != nil { + return fmt.Errorf("conflicting global rules found") + } + l.all = ml + return nil +} + +// Set method logger for "service/*". +// +// New methodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { + if _, ok := l.services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) + } + if l.services == nil { + l.services = make(map[string]*methodLoggerConfig) + } + l.services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New methodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.methods == nil { + l.methods = make(map[string]*methodLoggerConfig) + } + l.methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.blacklist == nil { + l.blacklist = make(map[string]struct{}) + } + l.blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) getMethodLogger(methodName string) *MethodLogger { + s, m, err := parseMethodName(methodName) + if err != nil { + grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.methods[s+"/"+m]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if _, ok := l.blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.services[s]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if l.all == nil { + return nil + } + return newMethodLogger(l.all.hdr, l.all.msg) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 0000000000..1ee00a39ac --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 0000000000..be30d0e65e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,210 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "google.golang.org/grpc/grpclog" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the previous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclog.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 0000000000..160f6e8616 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,423 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "net" + "strings" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +type MethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this plugable. +} + +func newMethodLogger(h, m uint64) *MethodLogger { + return &MethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: defaultSink, // TODO(blog): make it plugable. + } +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *MethodLogger) Log(c LogEntryConfig) { + m := c.toProto() + timestamp, _ := ptypes.TimestampProto(time.Now()) + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *pb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *pb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *pb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + + ml.sink.Write(m) +} + +func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.Value)) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +type LogEntryConfig interface { + toProto() *pb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *pb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &pb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &pb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &pb.GrpcLogEntry_ServerHeader{ + ServerHeader: &pb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ClientMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ServerMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclog.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &pb.GrpcLogEntry_Trailer{ + Trailer: &pb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *pb.Metadata { + ret := &pb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &pb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *pb.Address { + ret := &pb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = pb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = pb.Address_TYPE_IPV6 + } else { + ret.Type = pb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = pb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = pb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh new file mode 100644 index 0000000000..113d40cbe1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux -o pipefail + +TMP=$(mktemp -d) + +function finish { + rm -rf "$TMP" +} +trap finish EXIT + +pushd "$TMP" +mkdir -p grpc/binarylog/grpc_binarylog_v1 +curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto + +protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto +popd +rm -f ./grpc_binarylog_v1/*.pb.go +cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/ + diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 0000000000..a2e7c346dd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/golang/protobuf/proto" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/grpclog" +) + +var ( + defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// SetDefaultSink sets the sink where binary logs will be written to. +// +// Not thread safe. Only set during initialization. +func SetDefaultSink(s Sink) { + if defaultSink != nil { + defaultSink.Close() + } + defaultSink = s +} + +// Sink writes log entry into the binary log sink. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*pb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) *writerSink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclog.Infof("binary logging: failed to marshal proto message: %v", err) + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufWriteCloserSink struct { + mu sync.Mutex + closer io.Closer + out *writerSink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + + writeStartOnce sync.Once + writeTicker *time.Ticker +} + +func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error { + // Start the write loop when Write is called. + fs.writeStartOnce.Do(fs.startFlushGoroutine) + fs.mu.Lock() + if err := fs.out.Write(e); err != nil { + fs.mu.Unlock() + return err + } + fs.mu.Unlock() + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufWriteCloserSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for range fs.writeTicker.C { + fs.mu.Lock() + fs.buf.Flush() + fs.mu.Unlock() + } + }() +} + +func (fs *bufWriteCloserSink) Close() error { + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + fs.mu.Lock() + fs.buf.Flush() + fs.closer.Close() + fs.out.Close() + fs.mu.Unlock() + return nil +} + +func newBufWriteCloserSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufWriteCloserSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + } +} + +// NewTempFileSink creates a temp file and returns a Sink that writes to this +// file. +func NewTempFileSink() (Sink, error) { + tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") + if err != nil { + return nil, fmt.Errorf("failed to create temp file: %v", err) + } + return newBufWriteCloserSink(tempFile), nil +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go new file mode 100644 index 0000000000..15dc7803d8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/util.go @@ -0,0 +1,41 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "strings" +) + +// parseMethodName splits service and method from the input. It expects format +// "/service/method". +// +// TODO: move to internal/grpcutil. +func parseMethodName(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 0000000000..9f6a0c1200 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,85 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import "sync" + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `interface{}`. This means that a call to Put() incurs an extra memory +// allocation, and also that users need a type assertion while reading. For +// performance critical code paths, using Unbounded is strongly discouraged and +// defining a new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan interface{} + mu sync.Mutex + backlog []interface{} +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan interface{}, 1)} +} + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t interface{}) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, t) + b.mu.Unlock() +} + +// Load sends the earliest buffered data, if any, onto the read channel +// returned by Get(). Users are expected to call this every time they read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +func (b *Unbounded) Get() <-chan interface{} { + return b.c +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 0000000000..f0744f9937 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,727 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "fmt" + "sort" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 + maxTraceEntry = defaultMaxTraceEntry +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + NewChannelzStorage() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorage initializes channelz data storage and id generator. +// +// This function returns a cleanup function to wait for all channelz state to be reset by the +// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests +// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen +// to remove some entity just register by the new test, since the id space is the same. +// +// Note: This function is exported for testing purpose only. User should not call +// it in most cases. +func NewChannelzStorage() (cleanup func() error) { + db.set(&channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + }) + idGen.reset() + return func() error { + var err error + cm := db.get() + if cm == nil { + return nil + } + for i := 0; i < 1000; i++ { + cm.mu.Lock() + if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { + cm.mu.Unlock() + // all things stored in the channelz map have been cleared. + return nil + } + cm.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + cm.mu.Lock() + err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) + cm.mu.Unlock() + return err + } +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID, maxResults) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *ServerMetric { + return db.get().GetServer(id) +} + +// RegisterChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +// assigned to this channel. +func RegisterChannel(c Channel, pid int64, ref string) int64 { + id := idGen.genID() + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + if pid == 0 { + db.get().addChannel(id, cn, true, pid, ref) + } else { + db.get().addChannel(id, cn, false, pid, ref) + } + return id +} + +// RegisterSubChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). It returns the unique channelz tracking id assigned to this subchannel. +func RegisterSubChannel(c Channel, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a SubChannel's parent id cannot be 0") + return 0 + } + id := idGen.genID() + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addSubChannel(id, sc, pid, ref) + return id +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +func RegisterServer(s Server, ref string) int64 { + id := idGen.genID() + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return id +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +func RegisterListenSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a ListenSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addListenSocket(id, ls, pid, ref) + return id +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a NormalSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addNormalSocket(id, ns, pid, ref) + return id +} + +// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// channelz database. +func RemoveEntry(id int64) { + db.get().removeEntry(id) +} + +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added +// to the channel trace. +// The Parent field is optional. It is used for event that will be recorded in the entity's parent +// trace also. +type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. +func AddTraceEvent(id int64, desc *TraceEventDesc) { + if getMaxTraceEntry() == 0 { + return + } + db.get().traceEvent(id, desc) +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { + c.mu.Lock() + cn.cm = c + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { + c.mu.Lock() + sc.cm = c + sc.trace.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { + c.mu.Lock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + c.mu.Unlock() + return + } + childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *channel: + chanType = RefChannel + case *subChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&TraceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } + c.mu.Unlock() +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, maxResults)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + t[i].Trace = cn.trace.dumpData() + } + return t, end +} + +func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.servers)) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, maxResults)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := int64(len(svrskts)) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + count := int64(0) + var end bool + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + var s []*SocketMetric + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when + // holding the lock to prevent potential data race. + chanCopy := cn.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + cm.Trace = cn.trace.dumpData() + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when + // holding the lock to prevent potential data race. + chanCopy := sc.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + cm.Trace = sc.trace.dumpData() + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +func (c *channelMap) GetServer(id int64) *ServerMetric { + sm := &ServerMetric{} + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + c.mu.RUnlock() + return nil + } + sm.ListenSockets = copyMap(svr.listenSockets) + c.mu.RUnlock() + sm.ID = svr.id + sm.RefName = svr.refName + sm.ServerData = svr.s.ChannelzMetric() + return sm +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 0000000000..17c2274cb3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,702 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { + // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) + EventNum int64 + // CreationTime is the creation time of the trace. + CreationTime time.Time + // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the + // oldest one) + Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { + return &ChannelInternalMetric{} +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) getParentID() int64 { + return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { + if c.getTraceRefCount() != 0 { + c.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + c.cm.deleteEntry(c.id) + c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { + return c.trace +} + +func (c *channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *channel) getRefName() string { + return c.refName +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + traceRefCount int32 +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) getParentID() int64 { + return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.cm.findEntry(sc.pid).deleteChild(sc.id) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { + if sc.getTraceRefCount() != 0 { + // free the grpc struct (i.e. addrConn) + sc.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + sc.cm.deleteEntry(sc.id) + sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { + return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *subChannel) getRefName() string { + return sc.refName +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *listenSocket) getParentID() int64 { + return ls.pid +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +func (ns *normalSocket) getParentID() int64 { + return ns.pid +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} + +func (s *server) getParentID() int64 { + return 0 +} + +type tracedChannel interface { + getChannelTrace() *channelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +type channelTrace struct { + cm *channelMap + createdTime time.Time + eventCount int64 + mu sync.Mutex + events []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { + c.mu.Lock() + if len(c.events) == getMaxTraceEntry() { + del := c.events[0] + c.events = c.events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.events = append(c.events, e) + c.eventCount++ + c.mu.Unlock() +} + +func (c *channelTrace) clear() { + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUNKNOWN indicates unknown severity of a trace event. + CtUNKNOWN Severity = iota + // CtINFO indicates info level severity of a trace event. + CtINFO + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefChannel indicates the referenced entity is a Channel. + RefChannel RefChannelType = iota + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel +) + +func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} + ct.Events = c.events[:len(c.events)] + c.mu.Unlock() + return ct +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 0000000000..692dd61817 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,53 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 0000000000..79edbefc43 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,44 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go new file mode 100644 index 0000000000..fdf409d55d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -0,0 +1,39 @@ +// +build linux,!appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket interface{}) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go new file mode 100644 index 0000000000..8864a08111 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -0,0 +1,26 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c interface{}) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 0000000000..ae6c8972fd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strings" +) + +const ( + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" +) + +var ( + // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". + Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false") +) diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 0000000000..200b115ca2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + res := r.Int63n(n) + mu.Unlock() + return res +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + res := r.Intn(n) + mu.Unlock() + return res +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + res := r.Float64() + mu.Unlock() + return res +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 0000000000..fbe697c376 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 0000000000..0912f0bf4c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,72 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" +) + +var ( + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // StatusRawProto is exported by status/status.go. This func returns a + // pointer to the wrapped Status proto for a given status.Status without a + // call to proto.Clone(). The returned Status proto should not be mutated by + // the caller. + StatusRawProto interface{} // func (*status.Status) *spb.Status + // NewRequestInfoContext creates a new context based on the argument context attaching + // the passed in RequestInfo to the new context. + NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context + // ParseServiceConfigForTesting is for creating a fake + // ClientConn for resolver testing only + ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 0000000000..c368db62ea --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,441 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB +// addresses from SRV records. Must not be changed after init time. +var EnableSRVLookups = false + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +var ( + defaultResolver netResolver = net.DefaultResolver + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second +) + +var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, authority) + } +} + +var customAuthorityResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: customAuthorityDialler(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint, defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + if target.Authority == "" { + d.resolver = defaultResolver + } else { + d.resolver, err = customAuthorityResolver(target.Authority) + if err != nil { + return nil, err + } + } + + d.wg.Add(1) + go d.watcher() + d.ResolveNow(resolver.ResolveNowOptions{}) + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + host string + port string + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.rn: + } + + state, err := d.lookup() + if err != nil { + d.cc.ReportError(err) + } else { + d.cc.UpdateState(*state) + } + + // Sleep to prevent excessive re-resolutions. Incoming resolution requests + // will be queued in d.rn. + t := time.NewTimer(minDNSResRate) + select { + case <-t.C: + case <-d.ctx.Done(): + t.Stop() + return + } + } +} + +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + return nil, err + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + } + } + return newAddrs, nil +} + +var filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + return err +} + +func handleDNSError(err error, lookupType string) error { + err = filterError(err) + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + grpclog.Infoln(err) + } + return err +} + +func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + if err != nil { + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { + var newAddrs []resolver.Address + addrs, err := d.resolver.LookupHost(d.ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + state := &resolver.State{ + Addresses: append(addrs, srv...), + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT() + } + return state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + grpclog.Warningf("dns: error parsing service config json: %v", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + grpclog.Warningf("dns: error getting client hostname: %v", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go new file mode 100644 index 0000000000..8783a8cf82 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go @@ -0,0 +1,33 @@ +// +build go1.13 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +func init() { + filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { + // The name does not exist; not an error. + return nil + } + return err + } +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go new file mode 100644 index 0000000000..520d9229e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 0000000000..43281a3e07 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,114 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + grpclog.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux non-appengine environment. +type Rusage syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() (rusage *Rusage) { + rusage = new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) + return +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + f := (*syscall.Rusage)(first) + l := (*syscall.Rusage)(latest) + var ( + utimeDiffs = l.Utime.Sec - f.Utime.Sec + utimeDiffus = l.Utime.Usec - f.Utime.Usec + stimeDiffs = l.Stime.Sec - f.Stime.Sec + stimeDiffus = l.Stime.Usec - f.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 0000000000..d3fd9dab33 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,73 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once + +func log() { + once.Do(func() { + grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +// It always returns 0 under non-linux or appengine environment. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux or appengine environment. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux or appengine environment. +func GetRusage() (rusage *Rusage) { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux or appengine environment. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux or appengine environments +// a negative return value indicates the operation is not supported +func GetTCPUserTimeout(conn net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 0000000000..070680edba --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 0000000000..ddee20b6be --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,926 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "fmt" + "runtime" + "sync" + "sync/atomic" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it interface{} + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { + return il.head.it +} + +func (il *itemList) dequeue() interface{} { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + +type incomingSettings struct { + ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { + ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Value // *chan struct{} +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. +func (c *controlBuffer) throttle() { + ch, _ := c.trfChan.Load().(*chan struct{}) + if ch != nil { + select { + case <-*ch: + case <-c.done: + } + } +} + +func (c *controlBuffer) put(it cbItem) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + ch := make(chan struct{}) + c.trfChan.Store(&ch) + } + } + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Load().(*chan struct{}) + close(*ch) + c.trfChan.Store((*chan struct{})(nil)) + } + c.transportResponseFrames-- + } + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + c.finish() + return nil, ErrConnClosing + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. +func (l *loopyWriter) run() (err error) { + defer func() { + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + infof("transport: loopyWriter.run returning. %v", err) + err = nil + } + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return nil + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return nil + } + } + return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + if err := l.applySettings(s.ss); err != nil { + return err + } + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return nil +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + str.itl.enqueue(h) + return l.originateStream(str) +} + +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) + if err := hdr.initStream(str.id); err != nil { + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil + } + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { + str, ok := l.estdStreams[df.streamID] + if !ok { + return nil + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } + return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { + o.resp <- l.sendQuota + return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + return ErrConnClosing + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: + return l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + return l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + return l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + return l.outFlowControlSizeRequestHandler(i) + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } + return nil +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possilbe HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, nil + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + idx int + buf []byte + ) + if len(dataItem.h) != 0 { // data header has not been written out yet. + buf = dataItem.h + } else { + idx = 1 + buf = dataItem.d + } + size := http2MaxFrameLen + if len(buf) < size { + size = len(buf) + } + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if strQuota < size { + size = strQuota + } + + if l.sendQuota < uint32(size) { // connection-level flow control. + size = int(l.sendQuota) + } + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && size == len(buf) { + // buf contains either data or it contains header but data is empty. + if idx == 1 || len(dataItem.d) == 0 { + endStream = true + } + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + buf = buf[size:] + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + if idx == 0 { + dataItem.h = buf + } else { + dataItem.d = buf + } + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 0000000000..9fa306b2e0 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 0000000000..f262edd8ec --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,217 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + d := n - f.limit + f.limit = n + f.mu.Unlock() + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 0000000000..c3c32dafe9 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,435 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := contentSubtype(contentType) + if !validContentType { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats stats.Handler +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + } + ht.Close() + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(hdr) + ht.rw.Write(data) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + err := ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + v = encodeMetadataHeader(k, v) + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + }) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{credentials.PrivacyAndIntegrity}} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + if ht.stats != nil { + s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + ht.stats.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 0000000000..2d6feeb1be --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1454 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + md interface{} + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandler stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + // onPrefaceReceipt is a callback that client transport calls upon + // receiving server preface to signal that a succefull HTTP2 + // connection was established. + onPrefaceReceipt func() + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + + onGoAway func(GoAwayReason) + onClose func() + + bufferPool *bufferPool + + connectionID uint64 +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { + if fn != nil { + return fn(ctx, addr) + } + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + scheme = "https" + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + isSecure = true + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + md: addr.Metadata, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + onPrefaceReceipt: onPrefaceReceipt, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), + onGoAway: onGoAway, + onClose: onClose, + keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + } + if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) + go t.keepalive() + } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + } + } + + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + // If it's a connection error, let reader goroutine handle it + // since there might be data in the buffers. + if _, ok := err.(net.Error); !ok { + t.conn.Close() + } + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, + } +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = v + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + } + } + } + if md, ok := t.md.(*metadata.MD); ok { + for k, vv := range *md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if _, ok := status.FromError(err); ok { + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + var callAuthData map[string]string + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, status.Errorf(codes.Internal, "transport: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + ctx = peer.NewContext(ctx, t.getPeer()) + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, err + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) error { + t.mu.Lock() + if state := t.state; state != reachable { + t.mu.Unlock() + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return err + } + t.activeStreams[id] = s + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + return nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + if !checkForStreamQuota(it) { + return false + } + if !checkForHeaderListSize(it) { + return false + } + return true + }, hdr) + if err != nil { + return nil, err + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, hdrListSizeErr + } + firstTry = false + select { + case <-ch: + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-t.goAway: + return nil, errStreamDrain + case <-t.ctx.Done(): + return nil, ErrConnClosing + } + } + if t.statsHandler != nil { + header, _, _ := metadata.FromOutgoingContextRaw(ctx) + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header.Copy(), + } + t.statsHandler.HandleRPC(s.ctx, outHeader) + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(interface{}) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +// +// This method blocks until the addrConn that initiated this transport is +// re-connected. This happens because t.onClose() begins reconnect logic at the +// addrConn level and blocks until the addrConn is successfully connected. +func (t *http2Client) Close() error { + t.mu.Lock() + // Make sure we only Close once. + if t.state == closing { + t.mu.Unlock() + return nil + } + // Call t.onClose before setting the state to closing to prevent the client + // from attempting to create new streams ASAP. + t.onClose() + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + } + if t.statsHandler != nil { + connEnd := &stats.ConnEnd{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connEnd) + } + return err +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + } + if hdr != nil || data != nil { // If it's not an empty data frame. + // Add some data to grpc message header so that we can equally + // distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df.h, df.d = hdr, data + // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { + t.mu.Lock() + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s := t.getStream(f) + if s == nil { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s := t.getStream(f) + if s == nil { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancelation. Alter the status code accordingly. + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close() + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.controlBuf.put(&incomingGoAway{}) + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. + t.onGoAway(t.goAwayReason) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } +} + +func (t *http2Client) GetGoAwayReason() GoAwayReason { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s := t.getStream(frame) + if s == nil { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + state := &decodeState{} + // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. + state.data.isGRPC = !initialHeader + if err := state.decodeHeader(frame); err != nil { + t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) + return + } + + isHeader := false + defer func() { + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: s.header.Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: s.trailer.Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + }() + + // If headerChan hasn't been closed yet + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true + if !endStream { + // HEADERS frame block carries a Response-Headers. + isHeader = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = state.data.encoding + if len(state.data.mdata) > 0 { + s.header = state.data.mdata + } + } else { + // HEADERS frame block carries a Trailers-Only. + s.noHeaders = true + } + close(s.headerChan) + } + + if !endStream { + return + } + + // if client received END_STREAM from server while stream was still active, send RST_STREAM + rst := s.getState() == streamActive + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + defer close(t.readerDone) + // Check the validity of server preface. + frame, err := t.framer.fr.ReadFrame() + if err != nil { + t.Close() // this kicks off resetTransport, so must be last before return + return + } + t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.Close() // this kicks off resetTransport, so must be last before return + return + } + t.onPrefaceReceipt() + t.handleSettings(sf, true) + + // loop to keep reading incoming messages on this transport. + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + msg := t.framer.fr.ErrorDetail().Error() + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } else { + // Transport error. + t.Close() + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +func minTime(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} + +// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && timeoutLeft <= 0 { + t.Close() + return + } + t.mu.Lock() + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. + t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 0000000000..8b04b0392a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1253 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + // statusRawProto is a function to get to the raw status proto wrapped in a + // status.Status without a proto.Clone(). + statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status) +) + +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats stats.Handler + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + bufferPool *bufferPool + + connectionID uint64 +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + done := make(chan struct{}) + t := &http2Server{ + ctx: context.Background(), + done: done, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + czData: new(channelzData), + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.done) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.stats != nil { + t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + t.stats.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close() + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + if err := t.loopy.run(); err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + t.conn.Close() + close(t.writerDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + streamID := frame.Header().StreamID + state := &decodeState{ + serverSide: true, + } + if err := state.decodeHeader(frame); err != nil { + if se, ok := status.FromError(err); ok { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: statusCodeConvTab[se.Code()], + onWrite: func() {}, + }) + } + return false + } + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.data.encoding, + method: state.data.method, + contentSubtype: state.data.contentSubtype, + } + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if state.data.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. + if len(state.data.mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) + } + if state.data.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) + } + if state.data.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + } + if t.inTapHandle != nil { + var err error + info := &tap.Info{ + FullMethodName: state.data.method, + } + s.ctx, err = t.inTapHandle(s.ctx, info) + if err != nil { + warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return false + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + s.cancel() + return false + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return false + } + if streamID%2 != 1 || streamID <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + s.cancel() + return true + } + t.maxStreamID = streamID + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + if t.stats != nil { + s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + Header: metadata.MD(state.data.mdata).Copy(), + } + t.stats.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return false +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + errorf("transport: Got too many pings from the client, closing the connection.") + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + return false + } + } + return true +} + +// WriteHeader sends the header metadata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + if s.updateHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: t.setResetPingStrikes, + }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + if t.stats != nil { + // Note: WireLength is not set in outHeader. + // TODO(mmukhi): Revisit this later, if needed. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + } + t.stats.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + if s.getState() == streamDone { + return nil + } + s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := statusRawProto(st); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: t.setResetPingStrikes, + } + s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + if t.stats != nil { + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // TODO(mmukhi, dfawley): Make sure this is the right code to return. + return status.Errorf(codes.Internal, "transport: %v", err) + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + // TODO(mmukhi, dfawley): Should the server write also return io.EOF? + s.cancel() + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + } + // Add some data to header frame so that we can equally distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) + defer func() { + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() + }() + + for { + select { + case <-idleTimer.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + idleTimer.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.drain(http2.ErrCodeNo, []byte{}) + return + } + idleTimer.Reset(val) + case <-ageTimer.C: + t.drain(http2.ErrCodeNo, []byte{}) + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-ageTimer.C: + // Close the connection after grace period. + infof("transport: closing server transport due to maximum connection age.") + t.Close() + case <-t.done: + } + return + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && kpTimeoutLeft <= 0 { + infof("transport: closing server transport due to idleness.") + t.Close() + return + } + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() error { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + close(t.done) + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + if t.stats != nil { + connEnd := &stats.ConnEnd{} + t.stats.HandleConn(t.ctx, connEnd) + } + return err +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + sid := t.maxStreamID + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + if len(t.activeStreams) == 0 { + g.closeConn = true + } + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") + } + return true, nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.done: + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.done: + return -1 + case <-timer.C: + return -2 + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 0000000000..8f5f3349d9 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,677 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "math" + "net" + "net/http" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // baseContentType is the base content-type for gRPC. This is a valid + // content-type on it's own, but can also include a content-subtype such as + // "proto" as a suffix after "+" or ";". See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + baseContentType = "application/grpc" +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +type parsedHeaderData struct { + encoding string + // statusGen caches the stream status received from the trailer the server + // sent. Client side only. Do not access directly. After all trailers are + // parsed, use the status method to retrieve the status. + statusGen *status.Status + // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not + // intended for direct access outside of parsing. + rawStatusCode *int + rawStatusMsg string + httpStatus *int + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string + statsTags []byte + statsTrace []byte + contentSubtype string + + // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). + // + // We are in gRPC mode (peer speaking gRPC) if: + // * We are client side and have already received a HEADER frame that indicates gRPC peer. + // * The header contains valid a content-type, i.e. a string starts with "application/grpc" + // And we should handle error specific to gRPC. + // + // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we + // are in HTTP fallback mode, and should handle error specific to HTTP. + isGRPC bool + grpcErr error + httpErr error + contentTypeErr string +} + +// decodeState configures decoding criteria and records the decoded data. +type decodeState struct { + // whether decoding on server side or not + serverSide bool + + // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS + // frame once decodeHeader function has been invoked and returned. + data parsedHeaderData +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +// contentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func contentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// contentSubtype is assumed to be lowercase +func contentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} + +func (d *decodeState) status() *status.Status { + if d.data.statusGen == nil { + // No status-details were provided; generate status using code/msg. + d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) + } + return d.data.statusGen +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + return status.Error(codes.Internal, "peer header list size exceeded limit") + } + + for _, hf := range frame.Fields { + d.processHeaderField(hf) + } + + if d.data.isGRPC { + if d.data.grpcErr != nil { + return d.data.grpcErr + } + if d.serverSide { + return nil + } + if d.data.rawStatusCode == nil && d.data.statusGen == nil { + // gRPC status doesn't exist. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propagated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propagated. + code := int(codes.Unknown) + d.data.rawStatusCode = &code + } + return nil + } + + // HTTP fallback mode + if d.data.httpErr != nil { + return d.data.httpErr + } + + var ( + code = codes.Internal // when header does not include HTTP status, return INTERNAL + ok bool + ) + + if d.data.httpStatus != nil { + code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] + if !ok { + code = codes.Unknown + } + } + + return status.Error(code, d.constructHTTPErrMsg()) +} + +// constructErrMsg constructs error message to be returned in HTTP fallback mode. +// Format: HTTP status code and its corresponding message + content-type error message. +func (d *decodeState) constructHTTPErrMsg() string { + var errMsgs []string + + if d.data.httpStatus == nil { + errMsgs = append(errMsgs, "malformed header: missing HTTP status") + } else { + errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) + } + + if d.data.contentTypeErr == "" { + errMsgs = append(errMsgs, "transport: missing content-type field") + } else { + errMsgs = append(errMsgs, d.data.contentTypeErr) + } + + return strings.Join(errMsgs, "; ") +} + +func (d *decodeState) addMetadata(k, v string) { + if d.data.mdata == nil { + d.data.mdata = make(map[string][]string) + } + d.data.mdata[k] = append(d.data.mdata[k], v) +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) { + switch f.Name { + case "content-type": + contentSubtype, validContentType := contentSubtype(f.Value) + if !validContentType { + d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) + return + } + d.data.contentSubtype = contentSubtype + // TODO: do we want to propagate the whole content-type in the metadata, + // or come up with a way to just propagate the content-subtype if it was set? + // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} + // in the metadata? + d.addMetadata(f.Name, f.Value) + d.data.isGRPC = true + case "grpc-encoding": + d.data.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) + return + } + d.data.rawStatusCode = &code + case "grpc-message": + d.data.rawStatusMsg = decodeGrpcMessage(f.Value) + case "grpc-status-details-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return + } + s := &spb.Status{} + if err := proto.Unmarshal(v, s); err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return + } + d.data.statusGen = status.FromProto(s) + case "grpc-timeout": + d.data.timeoutSet = true + var err error + if d.data.timeout, err = decodeTimeout(f.Value); err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) + } + case ":path": + d.data.method = f.Value + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) + return + } + d.data.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + return + } + d.data.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + return + } + d.data.statsTrace = v + d.addMetadata(f.Name, string(v)) + default: + if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { + break + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + return + } + d.addMetadata(f.Name, v) + } +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if m := d % r; m > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. +func encodeTimeout(t time.Duration) string { + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + buf.WriteString(fmt.Sprintf("%%%02X", b)) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + buf.WriteByte(b) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", b)) + } + } + msg = msg[size:] + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + +type bufWriter struct { + buf []byte + offset int + batchSize int + conn net.Conn + err error + + onFlush func() +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), + batchSize: batchSize, + conn: conn, + } +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + return w.conn.Write(b) + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + if w.onFlush != nil { + w.onFlush() + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.offset = 0 + return w.err +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + w := newBufWriter(conn, writeBufferSize) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go new file mode 100644 index 0000000000..879df80c4d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/log.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains wrappers for grpclog functions. +// The transport package only logs to verbose level 2 by default. + +package transport + +import "google.golang.org/grpc/grpclog" + +const logLevel = 2 + +func infof(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Infof(format, args...) + } +} + +func warningf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Warningf(format, args...) + } +} + +func errorf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Errorf(format, args...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 0000000000..a30da9eb32 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,808 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer *bytes.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last *bytes.Buffer // Stores the remaining data in the previous calls. + err error + freeBuffer func(*bytes.Buffer) +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + // Read remaining data left in last call. + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) + r.last = nil + } + return copied, nil + } + if r.closeStream != nil { + n, r.err = r.readClient(p) + } else { + n, r.err = r.read(p) + } + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, p) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer + } + return copied, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ct *http2Client // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + wq *writeQuota + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + if !s.headerValid { + return nil, s.status.Err() + } + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + ChannelzParentID int64 + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 +} + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { + return newHTTP2Server(conn, config) +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandler stores the handler for stats. + StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID int64 + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 +} + +// TargetInfo contains the information of the target such as network address and metadata. +type TargetInfo struct { + Addr string + Metadata interface{} + Authority string +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received. + GetGoAwayReason() GoAwayReason + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 0000000000..34d31b5e7d --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 0000000000..cf6d1b9478 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,209 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := MD{} + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var key string + for i, s := range kv { + if i%2 == 0 { + key = strings.ToLower(s) + continue + } + md[key] = append(md[key], s) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Get obtains the values for a given key. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at that key. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Join joins any number of mds into a single MD. +// The order of values for each key is determined by the order in which +// the mds containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the +// documentation of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromIncomingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdIncomingKey{}).(MD) + return +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents +// of rawMD. Remember to perform strings.ToLower on the keys. The returned +// MD should not be modified. Writing to it may cause races. Modification +// should be made to copies of the returned MD. +// +// This is intended for gRPC-internal use ONLY. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mds := make([]MD, 0, len(raw.added)+1) + mds = append(mds, raw.md) + for _, vv := range raw.added { + mds = append(mds, Pairs(vv...)) + } + return Join(mds...), ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go new file mode 100644 index 0000000000..c9f79dc533 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -0,0 +1,293 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "context" + "errors" + "fmt" + "net" + "strconv" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 +) + +var ( + errMissingAddr = errors.New("missing address") + errWatcherClose = errors.New("watcher has been closed") + + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) + +// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and +// create watchers that poll the DNS server using the frequency set by freq. +func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { + return &dnsResolver{freq: freq}, nil +} + +// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create +// watchers that poll the DNS server using the default frequency defined by defaultFreq. +func NewDNSResolver() (Resolver, error) { + return NewDNSResolverWithFreq(defaultFreq) +} + +// dnsResolver handles name resolution for names following the DNS scheme +type dnsResolver struct { + // frequency of polling the DNS server that the watchers created by this resolver will use. + freq time.Duration +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err := net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v", target) +} + +// Resolve creates a watcher that watches the name resolution of the target. +func (r *dnsResolver) Resolve(target string) (Watcher, error) { + host, port, err := parseTarget(target) + if err != nil { + return nil, err + } + + if net.ParseIP(host) != nil { + ipWatcher := &ipWatcher{ + updateChan: make(chan *Update, 1), + } + host, _ = formatIP(host) + ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} + return ipWatcher, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + return &dnsWatcher{ + r: r, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + t: time.NewTimer(0), + }, nil +} + +// dnsWatcher watches for the name resolution update for a specific target +type dnsWatcher struct { + r *dnsResolver + host string + port string + // The latest resolved address set + curAddrs map[string]*Update + ctx context.Context + cancel context.CancelFunc + t *time.Timer +} + +// ipWatcher watches for the name resolution update for an IP address. +type ipWatcher struct { + updateChan chan *Update +} + +// Next returns the address resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unnecessary. Therefore, +// Next() will return an Update the first time it is called, and will be blocked +// for all following calls as no Update exists until watcher is closed. +func (i *ipWatcher) Next() ([]*Update, error) { + u, ok := <-i.updateChan + if !ok { + return nil, errWatcherClose + } + return []*Update{u}, nil +} + +// Close closes the ipWatcher. +func (i *ipWatcher) Close() { + close(i.updateChan) +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + +// compileUpdate compares the old resolved addresses and newly resolved addresses, +// and generates an update list +func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { + var res []*Update + for a, u := range w.curAddrs { + if _, ok := newAddrs[a]; !ok { + u.Op = Delete + res = append(res, u) + } + } + for a, u := range newAddrs { + if _, ok := w.curAddrs[a]; !ok { + res = append(res, u) + } + } + return res +} + +func (w *dnsWatcher) lookupSRV() map[string]*Update { + newAddrs := make(map[string]*Update) + _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(w.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs[addr] = &Update{Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} + } + } + return newAddrs +} + +func (w *dnsWatcher) lookupHost() map[string]*Update { + newAddrs := make(map[string]*Update) + addrs, err := lookupHost(w.ctx, w.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + w.port + newAddrs[addr] = &Update{Addr: addr} + } + return newAddrs +} + +func (w *dnsWatcher) lookup() []*Update { + newAddrs := w.lookupSRV() + if newAddrs == nil { + // If failed to get any balancer address (either no corresponding SRV for the + // target, or caused by failure during resolution/parsing of the balancer target), + // return any A record info available. + newAddrs = w.lookupHost() + } + result := w.compileUpdate(newAddrs) + w.curAddrs = newAddrs + return result +} + +// Next returns the resolved address update(delta) for the target. If there's no +// change, it will sleep for 30 mins and try to resolve again after that. +func (w *dnsWatcher) Next() ([]*Update, error) { + for { + select { + case <-w.ctx.Done(): + return nil, errWatcherClose + case <-w.t.C: + } + result := w.lookup() + // Next lookup should happen after an interval defined by w.r.freq. + w.t.Reset(w.r.freq) + if len(result) > 0 { + return result, nil + } + } +} + +func (w *dnsWatcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go new file mode 100644 index 0000000000..f4c1c8b689 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,68 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// +// This package is deprecated: please use package resolver instead. +package naming + +// Operation defines the corresponding operations for a name resolution change. +// +// Deprecated: please use package resolver. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an existing address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +// +// Deprecated: please use package resolver. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +// +// Deprecated: please use package resolver. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +// +// Deprecated: please use package resolver. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. It should + // return an error if and only if Watcher cannot recover. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 0000000000..e01d219ffb --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 0000000000..00447894f0 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,229 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "io" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" +) + +// v2PickerWrapper wraps a balancer.Picker while providing the +// balancer.V2Picker API. It requires a pickerWrapper to generate errors +// including the latest connectionError. To be deleted when balancer.Picker is +// updated to the balancer.V2Picker API. +type v2PickerWrapper struct { + picker balancer.Picker + connErr *connErr +} + +func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + sc, done, err := v.picker.Pick(info.Ctx, info) + if err != nil { + if err == balancer.ErrTransientFailure { + return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError())) + } + return balancer.PickResult{}, err + } + return balancer.PickResult{SubConn: sc, Done: done}, nil +} + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.V2Picker + + // The latest connection error. TODO: remove when V1 picker is deprecated; + // balancer should be responsible for providing the error. + *connErr +} + +type connErr struct { + mu sync.Mutex + err error +} + +func (c *connErr) updateConnectionError(err error) { + c.mu.Lock() + c.err = err + c.mu.Unlock() +} + +func (c *connErr) connectionError() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func newPickerWrapper() *pickerWrapper { + return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}} +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr}) +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return + } + pw.picker = p + // pw.blockingCh should never be nil. + close(pw.blockingCh) + pw.blockingCh = make(chan struct{}) + pw.mu.Unlock() +} + +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() + ac.incrCallsStarted() + return func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ch chan struct{} + + var lastPickErr error + for { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if pw.picker == nil { + ch = pw.blockingCh + } + if ch == pw.blockingCh { + // This could happen when either: + // - pw.picker is nil (the previous if condition), or + // - has called pick on the current picker. + pw.mu.Unlock() + select { + case <-ctx.Done(): + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else if connectionErr := pw.connectionError(); connectionErr != nil { + errStr = "latest connection error: " + connectionErr.Error() + } else { + errStr = ctx.Err().Error() + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, nil, status.Error(codes.Canceled, errStr) + } + case <-ch: + } + continue + } + + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() + + pickResult, err := p.Pick(info) + + if err != nil { + if err == balancer.ErrNoSubConnAvailable { + continue + } + if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() { + if !failfast { + lastPickErr = err + continue + } + return nil, nil, status.Error(codes.Unavailable, err.Error()) + } + if _, ok := status.FromError(err); ok { + return nil, nil, err + } + // err is some other error. + return nil, nil, status.Error(codes.Unknown, err.Error()) + } + + acw, ok := pickResult.SubConn.(*acBalancerWrapper) + if !ok { + grpclog.Error("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if channelz.IsOn() { + return t, doneChannelzWrapper(acw, pickResult.Done), nil + } + return t, pickResult.Done, nil + } + if pickResult.Done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + pickResult.Done(balancer.DoneInfo{}) + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (pw *pickerWrapper) close() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.done = true + close(pw.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 0000000000..c43dac9ad8 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,159 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" +) + +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pickfirstBalancer struct { + state connectivity.State + cc balancer.ClientConn + sc balancer.SubConn +} + +var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2 + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + b.ResolverError(err) + return + } + b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s}) +} + +func (b *pickfirstBalancer) ResolverError(err error) { + switch b.state { + case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: + // Set a failing picker if we don't have a good picker. + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}}, + ) + } + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err) + } +} + +func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { + if len(cs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + if b.sc == nil { + var err error + b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if grpclog.V(2) { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}}, + ) + return balancer.ErrBadResolverState + } + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) + b.sc.Connect() + } else { + b.sc.UpdateAddresses(cs.ResolverState.Addresses) + b.sc.Connect() + } + return nil +} + +func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + } + if b.sc != sc { + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + } + return + } + b.state = s.ConnectivityState + if s.ConnectivityState == connectivity.Shutdown { + b.sc = nil + return + } + + switch s.ConnectivityState { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + case connectivity.Connecting: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + case connectivity.TransientFailure: + err := balancer.ErrTransientFailure + // TODO: this can be unconditional after the V1 API is removed, as + // SubConnState will always contain a connection error. + if s.ConnectionError != nil { + err = balancer.TransientFailureError(s.ConnectionError) + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: s.ConnectivityState, + Picker: &picker{err: err}, + }) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 0000000000..76acbbcc93 --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// This API is EXPERIMENTAL. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData []byte + hdr []byte + payload []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + if err != nil { + return err + } + p.hdr, p.payload = msgHeader(data, compData) + return nil +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go new file mode 100644 index 0000000000..f8f69bfb70 --- /dev/null +++ b/vendor/google.golang.org/grpc/proxy.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bufio" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // errDisabled indicates that proxy is disabled for the address. + errDisabled = errors.New("proxy is disabled for the address") + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + if url == nil { + return nil, errDisabled + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// newProxyDialer returns a dialer that connects to proxy first if necessary. +// The returned dialer checks if a proxy is necessary, dial to the proxy with the +// provided dialer, does HTTP CONNECT handshake and returns the connection. +func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (conn net.Conn, err error) { + var newAddr string + proxyURL, err := mapAddress(ctx, addr) + if err != nil { + if err != errDisabled { + return nil, err + } + newAddr = addr + } else { + newAddr = proxyURL.Host + } + + conn, err = dialer(ctx, newAddr) + if err != nil { + return + } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) + } + return + } +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 0000000000..14aa6f20ae --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go new file mode 100644 index 0000000000..c8a0c3daa1 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package passthrough + +import _ "google.golang.org/grpc/internal/resolver/passthrough" // import for side effects after package was moved diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 0000000000..fe14b2fb98 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,253 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "context" + "net" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// AddressType indicates the address type returned by name resolution. +// +// Deprecated: use Attributes in Address instead. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + // + // Deprecated: use Attributes in Address instead. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + // + // Deprecated: use Attributes in Address instead. + GRPCLB +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // + // If Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. + ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes + + // Type is the type of this address. + // + // Deprecated: use Attributes instead. + Type AddressType + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. + Metadata interface{} +} + +// BuildOptions includes additional information for the builder to create +// the resolver. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. + DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + // Addresses is the latest set of resolved addresses for the target. + Addresses []Address + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult + + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + UpdateState(State) + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + // + // Deprecated: Use UpdateState instead. + NewServiceConfig(serviceConfig string) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext by the user. And +// grpc passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will +// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed +// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// +// If the target does not contain a scheme, we will apply the default scheme, and set the Target to +// be the full target string. e.g. "foo.bar" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// +// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the +// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target +// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOptions) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 0000000000..3eaf724cd6 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,263 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + cc *ClientConn + resolverMu sync.Mutex + resolver resolver.Resolver + done *grpcsync.Event + curState resolver.State + + pollingMu sync.Mutex + polling chan struct{} +} + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// parseTarget splits target into a struct containing scheme, authority and +// endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. +func parseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + return ret +} + +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { + ccr := &ccResolverWrapper{ + cc: cc, + done: grpcsync.NewEvent(), + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + rbo := resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + } + + var err error + // We need to hold the lock here while we assign to the ccr.resolver field + // to guard against a data race caused by the following code path, + // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up + // accessing ccr.resolver which is being assigned here. + ccr.resolverMu.Lock() + defer ccr.resolverMu.Unlock() + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + if err != nil { + return nil, err + } + return ccr, nil +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.resolverMu.Lock() + if !ccr.done.HasFired() { + ccr.resolver.ResolveNow(o) + } + ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolverMu.Lock() + ccr.resolver.Close() + ccr.done.Fire() + ccr.resolverMu.Unlock() +} + +// poll begins or ends asynchronous polling of the resolver based on whether +// err is ErrBadResolverState. +func (ccr *ccResolverWrapper) poll(err error) { + ccr.pollingMu.Lock() + defer ccr.pollingMu.Unlock() + if err != balancer.ErrBadResolverState { + // stop polling + if ccr.polling != nil { + close(ccr.polling) + ccr.polling = nil + } + return + } + if ccr.polling != nil { + // already polling + return + } + p := make(chan struct{}) + ccr.polling = p + go func() { + for i := 0; ; i++ { + ccr.resolveNow(resolver.ResolveNowOptions{}) + t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) + select { + case <-p: + t.Stop() + return + case <-ccr.done.Done(): + // Resolver has been closed. + t.Stop() + return + case <-t.C: + select { + case <-p: + return + default: + } + // Timer expired; re-resolve. + } + } + }() +} + +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { + if ccr.done.HasFired() { + return + } + grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(s) + } + ccr.curState = s + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +func (ccr *ccResolverWrapper) ReportError(err error) { + if ccr.done.HasFired() { + return + } + grpclog.Warningf("ccResolverWrapper: reporting error to cc: %v", err) + if channelz.IsOn() { + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver reported error: %v", err), + Severity: channelz.CtWarning, + }) + } + ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) +} + +// NewAddress is called by the resolver implementation to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + if ccr.done.HasFired() { + return + } + grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + } + ccr.curState.Addresses = addrs + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +// NewServiceConfig is called by the resolver implementation to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + if ccr.done.HasFired() { + return + } + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + if ccr.cc.dopts.disableServiceConfig { + grpclog.Infof("Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + grpclog.Warningf("ccResolverWrapper: error parsing service config: %v", scpr.Err) + if channelz.IsOn() { + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Error parsing service config: %v", scpr.Err), + Severity: channelz.CtWarning, + }) + } + ccr.poll(balancer.ErrBadResolverState) + return + } + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + } + ccr.curState.ServiceConfig = scpr + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), + Severity: channelz.CtINFO, + }) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 0000000000..d3a4adc5ee --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,887 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "net/url" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + stream ClientStream + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo) { + if c.stream != nil { + *o.HeaderAddr, _ = c.stream.Header() + } +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo) { + if c.stream != nil { + *o.TrailerAddr = c.stream.Trailer() + } +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo) { + if c.stream != nil { + if x, ok := peer.FromContext(c.stream.Context()); ok { + *o.PeerAddr = *x + } + } +} + +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs don't "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// This is an EXPERIMENTAL API. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. +func MaxCallRecvMsgSize(s int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can receive. +// This is an EXPERIMENTAL API. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. +func MaxCallSendMsgSize(s int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can send. +// This is an EXPERIMENTAL API. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// This is an EXPERIMENTAL API. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// This API is EXPERIMENTAL. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// This is an EXPERIMENTAL API. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// This is an EXPERIMENTAL API. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo) {} + +// ForceCodec returns a CallOption that will set the given Codec to be +// used for all request and response messages for a call. The result of calling +// String() will be used as the content-subtype in a case-insensitive manner. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// This is an EXPERIMENTAL API. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// This is an EXPERIMENTAL API. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o ForceCodecCallOption) after(c *callInfo) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// This is an EXPERIMENTAL API. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// This API is EXPERIMENTAL. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// This is an EXPERIMENTAL API. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, err := compressor.Compress(cbuf) + if err != nil { + return nil, wrapErr(err) + } + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) + } + if err := z.Close(); err != nil { + return nil, wrapErr(err) + } + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) + } + } + return cbuf.Bytes(), nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData + } else { + hdr[0] = byte(compressionNone) + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + wireLength int // The compressed length got from wire. + uncompressedBytes []byte +} + +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { + payInfo.wireLength = len(d) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return nil, st.Err() + } + + var size int + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + d, err = dc.Do(bytes.NewReader(d)) + size = len(d) + } else { + d, size, err = decompress(compressor, d, maxReceiveMessageSize) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } else { + size = len(d) + } + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + return d, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, 0, err + } + if sizer, ok := compressor.(interface { + DecompressedSize(compressedBytes []byte) int + }); ok { + if size := sizer.DecompressedSize(d); size >= 0 { + if size > maxReceiveMessageSize { + return nil, size, nil + } + // size is used as an estimate to size the buffer, but we + // will read more data if available. + // +MinRead so ReadFrom will not reallocate if size is correct. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return buf.Bytes(), int(bytesRead), err + } + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if payInfo != nil { + payInfo.uncompressedBytes = d + } + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + return status.Error(codes.Internal, err.Error()) + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it. + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// parseDialTarget returns the network and address to pass to dialer +func parseDialTarget(target string) (net string, addr string) { + net = "tcp" + + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + net = n + addr = target[m1+1:] + return net, addr + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr = t.Path + if scheme == "unix" { + net = scheme + if addr == "" { + addr = t.Host + } + return net, addr + } + } + + return net, target +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 6. +// +// Older versions are kept for compatibility. They may be removed if +// compatibility cannot be maintained. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 0000000000..0d75cb109a --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1548 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/trace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + +var statusOK = status.New(codes.OK, "") + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// service consists of the information of the server serving this service and +// the methods in this service. +type service struct { + server interface{} // the server for service methods + md map[string]*MethodDesc + sd map[string]*StreamDesc + mdata interface{} +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + m map[string]*service // service name -> service info + events trace.EventLog + + quit *grpcsync.Event + done *grpcsync.Event + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 +} + +var defaultServerOptions = serverOptions{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, +} + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// This API is EXPERIMENTAL. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// WriteBufferSize determines how much data can be batched before doing a write on the wire. +// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +// The default value for this buffer is 32KB. +// Zero will disable the write buffer such that each write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +// The default value for this buffer is 32KB. +// Zero will disable read buffer for a connection so data framer can access the underlying +// conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < time.Second { + grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = time.Second + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.statsHandler = h + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// This API is EXPERIMENTAL. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxHeaderListSize = &s + }) +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// This API is EXPERIMENTAL. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[transport.ServerTransport]bool), + m: make(map[string]*service), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), + } + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if channelz.IsOn() { + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + srv := &service{ + server: ss, + md: make(map[string]*MethodDesc), + sd: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.StreamName] = d + } + s.m[sd.ServiceName] = srv +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.m { + methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) + for m := range srv.md { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.sd { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + +type listenSocket struct { + net.Listener + channelzID int64 +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + if channelz.IsOn() { + channelz.RemoveEntry(l.channelzID) + } + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + if channelz.IsOn() { + ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + } + s.mu.Unlock() + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var tempDelay time.Duration // how long to sleep on accept failure + + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit.Done(): + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + if s.quit.HasFired() { + return nil + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + rawConn.Close() + } + rawConn.SetDeadline(time.Time{}) + return + } + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + + rawConn.SetDeadline(time.Time{}) + if !s.addConn(st) { + return + } + go func() { + s.serveStreams(st) + s.removeConn(st) + }() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, + } + st, err := transport.NewServerTransport("http2", c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + return nil + } + + return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL +// and subject to change. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: st.RemoteAddr(), + }, + } + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = time.Until(dl) + } + return trInfo +} + +func (s *Server) addConn(st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close() + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain() + } + s.conns[st] = true + return true +} + +func (s *Server) removeConn(st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, st) + s.cv.Broadcast() + } +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { + return &channelz.ServerInternalMetric{ + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + } +} + +func (s *Server) incrCallsStarted() { + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + grpclog.Errorln("grpc: server failed to encode response: ", err) + return err + } + compData, err := compress(data, cp, comp) + if err != nil { + grpclog.Errorln("grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil && s.opts.statsHandler != nil { + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } + return err +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + sh := s.opts.statsHandler + if sh != nil || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() + } + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + binlog := binarylog.GetMethodLogger(stream.Method()) + if binlog != nil { + ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + binlog.Log(logEntry) + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + stream.SetSendCompress(cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + stream.SetSendCompress(rc) + } + } + + var payInfo *payloadInfo + if sh != nil || binlog != nil { + payInfo = &payloadInfo{} + } + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v interface{}) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if sh != nil { + sh.HandleRPC(stream.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + WireLength: payInfo.wireLength, + Data: d, + Length: len(d), + }) + } + if binlog != nil { + binlog.Log(&binarylog.ClientMessage{ + Message: d, + }) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(codes.Unknown, appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if binlog != nil { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + } + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerMessage{ + Message: reply, + }) + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + err = t.WriteStatus(stream, statusOK) + if binlog != nil { + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } + sh := s.opts.statsHandler + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, + } + + if sh != nil || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + ss.binlog = binarylog.GetMethodLogger(stream.Method()) + if ss.binlog != nil { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + ss.binlog.Log(logEntry) + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + stream.SetSendCompress(s.opts.cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + stream.SetSendCompress(rc) + } + } + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error + var server interface{} + if srv != nil { + server = srv.server + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + appStatus = status.New(codes.Unknown, appErr.Error()) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + err = t.WriteStatus(ss.s, statusOK) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + return err +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + srv, knownService := s.m[service] + if knownService { + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if trInfo != nil { + trInfo.tr.LazyPrintf("%s", errDesc) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// This API is EXPERIMENTAL. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// This API is EXPERIMENTAL. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// This API is EXPERIMENTAL. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quit.Fire() + + defer func() { + s.serveWG.Wait() + s.done.Fire() + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + + s.mu.Lock() + listeners := s.lis + s.lis = nil + st := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for c := range st { + c.Close() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return + } + + for lis := range s.lis { + lis.Close() + } + s.lis = nil + if !s.drain { + for st := range s.conns { + st.Drain() + } + s.drain = true + } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec +} + +// SetHeader sets the header metadata. +// When called multiple times, all the provided metadata will be merged. +// All the metadata will be sent out when one of the following happens: +// - grpc.SendHeader() is called; +// - The first response is sent out; +// - An RPC status is sent out (error or success). +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once. +// The provided md and headers set by SetHeader() will be sent. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 0000000000..5a80a575a5 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,434 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + retryPolicy *retryPolicy +} + +type lbConfig struct { + name string + cfg serviceconfig.LoadBalancingConfig +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // LB is the load balancer the service providers recommends. The balancer + // specified via grpc.WithBalancer will override this. This is deprecated; + // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig + // will be used. + LB *string + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig *lbConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +// retryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + maxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + initialBackoff time.Duration + maxBackoff time.Duration + backoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + retryableStatusCodes map[codes.Code]bool +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff string + MaxBackoff string + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + +type jsonName struct { + Service *string + Method *string +} + +func (j jsonName) generatePath() (string, bool) { + if j.Service == nil { + return "", false + } + res := "/" + *j.Service + "/" + if j.Method != nil { + res += *j.Method + } + return res, true +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +type loadBalancingConfig map[string]json.RawMessage + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *[]loadBalancingConfig + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfigForTesting = parseServiceConfig +} +func parseServiceConfig(js string) *serviceconfig.ParseResult { + if len(js) == 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + if rsc.LoadBalancingConfig != nil { + for i, lbcfg := range *rsc.LoadBalancingConfig { + if len(lbcfg) != 1 { + err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + grpclog.Warningf(err.Error()) + return &serviceconfig.ParseResult{Err: err} + } + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range lbcfg { + } + builder := balancer.Get(name) + if builder == nil { + continue + } + sc.lbConfig = &lbConfig{name: name} + if parser, ok := builder.(balancer.ConfigParser); ok { + var err error + sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) + if err != nil { + return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)} + } + } else if string(jsonCfg) != "{}" { + grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + break + } + if sc.lbConfig == nil { + // We had a loadBalancingConfig field but did not encounter a + // supported policy. The config is considered invalid in this + // case. + err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + grpclog.Warningf(err.Error()) + return &serviceconfig.ParseResult{Err: err} + } + } + + if rsc.MethodConfig == nil { + return &serviceconfig.ParseResult{Config: &sc} + } + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseDuration(m.Timeout) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for _, n := range *m.Name { + if path, valid := n.generatePath(); valid { + sc.Methods[path] = mc + } + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} + } + } + return &serviceconfig.ParseResult{Config: &sc} +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { + if jrp == nil { + return nil, nil + } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } + + if jrp.MaxAttempts <= 1 || + *ib <= 0 || + *mb <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &retryPolicy{ + maxAttempts: jrp.MaxAttempts, + initialBackoff: *ib, + maxBackoff: *mb, + backoffMultiplier: jrp.BackoffMultiplier, + retryableStatusCodes: make(map[codes.Code]bool), + } + if rp.maxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.maxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.retryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 0000000000..187c304421 --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,41 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// This package is EXPERIMENTAL. +package serviceconfig + +// Config represents an opaque data structure holding a service config. +type Config interface { + isServiceConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancing config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 0000000000..dc03731e45 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 0000000000..9e22c393f1 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,311 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 0000000000..a1348e9b16 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,228 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" +) + +func init() { + internal.StatusRawProto = statusRawProto +} + +func statusRawProto(s *Status) *spb.Status { return s.s } + +// statusError is an alias of a status proto. It implements error and Status, +// and a nil statusError should never be returned by this package. +type statusError spb.Status + +func (se *statusError) Error() string { + p := (*spb.Status)(se) + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +} + +func (se *statusError) GRPCStatus() *Status { + return &Status{s: (*spb.Status)(se)} +} + +// Is implements future error.Is functionality. +// A statusError is equivalent if the code and message are identical. +func (se *statusError) Is(target error) bool { + tse, ok := target.(*statusError) + if !ok { + return false + } + + return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) +} + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is +// OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return (*statusError)(s.s) +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// FromError returns a Status representing err if it was produced from this +// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a +// Status is returned with codes.Unknown and the original error message. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus(), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown +} + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return nil + case context.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 0000000000..bb99940e36 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1529 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "strconv" + "sync" + "time" + + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. If a StreamHandler returns an error, it +// should be produced by the status package, or else gRPC will use +// codes.Unknown as the status code and err.Error() as the status message +// of the RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler StreamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m interface{}) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m interface{}) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + c := defaultCallInfo() + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) + sh := cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + beginTime: beginTime, + firstAttempt: true, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + cs.binlog = binarylog.GetMethodLogger(method) + + cs.callInfo.stream = cs + // Only this initial attempt has stats/tracing. + // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. + if err := cs.newAttemptLocked(sh, trInfo); err != nil { + cs.finish(err) + return nil, err + } + + op := func(a *csAttempt) error { return a.newStream() } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + cs.finish(err) + return nil, err + } + + if cs.binlog != nil { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + cs.binlog.Log(logEntry) + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// newAttemptLocked creates a new attempt with a transport. +// If it succeeds, then it replaces clientStream's attempt with this new attempt. +func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { + newAttempt := &csAttempt{ + cs: cs, + dc: cs.cc.dopts.dc, + statsHandler: sh, + trInfo: trInfo, + } + defer func() { + if retErr != nil { + // This attempt is not set in the clientStream, so it's finish won't + // be called. Call it here for stats and trace in case they are not + // nil. + newAttempt.finish(retErr) + } + }() + + if err := cs.ctx.Err(); err != nil { + return toRPCErr(err) + } + t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + return err + } + if trInfo != nil { + trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + } + newAttempt.t = t + newAttempt.done = done + cs.attempt = newAttempt + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + s, err := a.t.NewStream(cs.ctx, cs.callHdr) + if err != nil { + return toRPCErr(err) + } + cs.attempt.s = s + cs.attempt.p = &parser{r: s} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + beginTime time.Time + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlog *binarylog.MethodLogger // Binary logger, can be nil. + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandler stats.Handler +} + +func (cs *clientStream) commitAttemptLocked() { + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. +func (cs *clientStream) shouldRetry(err error) error { + if cs.attempt.s == nil && !cs.callInfo.failFast { + // In the event of any error from NewStream (attempt.s == nil), we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } + if cs.finished || cs.committed { + // RPC is finished or committed; cannot retry. + return err + } + // Wait for the trailers. + if cs.attempt.s != nil { + <-cs.attempt.s.Done() + } + if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + // First attempt, stream unprocessed: transparently retry. + cs.firstAttempt = false + return nil + } + cs.firstAttempt = false + if cs.cc.dopts.disableRetry { + return err + } + + pushback := 0 + hasPushback := false + if cs.attempt.s != nil { + if !cs.attempt.s.TrailersOnly() { + return err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + hasPushback = true + } else if len(sps) > 1 { + grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + } + + var code codes.Code + if cs.attempt.s != nil { + code = cs.attempt.s.Status().Code() + } else { + code = status.Convert(err).Code() + } + + rp := cs.methodConfig.retryPolicy + if rp == nil || !rp.retryableStatusCodes[code] { + return err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return err + } + if cs.numRetries+1 >= rp.maxAttempts { + return err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.initialBackoff) * fact + if max := float64(rp.maxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return nil + case <-cs.ctx.Done(): + t.Stop() + return status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(lastErr error) error { + for { + cs.attempt.finish(lastErr) + if err := cs.shouldRetry(lastErr); err != nil { + cs.commitAttemptLocked() + return err + } + if err := cs.newAttemptLocked(nil, nil); err != nil { + return err + } + if lastErr = cs.replayBufferLocked(); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + return cs.attempt.s.Context() +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + return op(cs.attempt) + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + if err != nil { + cs.finish(err) + return nil, err + } + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Only log if binary log is on and header has not been logged. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + cs.serverHeaderBinlogged = true + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked() error { + a := cs.attempt + for _, f := range cs.buffer { + if err := f(a); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + msgBytes := data // Store the pointer before setting to nil. For binary logging. + op := func(a *csAttempt) error { + err := a.sendMsg(m, hdr, payload, data) + // nil out the message and uncomp when replaying; they are only needed for + // stats which is disabled for subsequent attempts. + m, data = nil, nil + return err + } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ClientMessage{ + OnClientSide: true, + Message: msgBytes, + }) + } + return +} + +func (cs *clientStream) RecvMsg(m interface{}) error { + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if cs.binlog != nil { + recvInfo = &payloadInfo{} + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes, + }) + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + + if cs.binlog != nil { + // finish will not log Trailer. Log Trailer here. + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if logEntry.Err == io.EOF { + logEntry.Err = nil + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + } + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + if cs.binlog != nil { + cs.binlog.Log(&binarylog.ClientHalfClose{ + OnClientSide: true, + }) + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + cs.commitAttemptLocked() + cs.mu.Unlock() + // For binary logging. only log cancel in finish (could be caused by RPC ctx + // canceled or ClientConn closed). Trailer will be logged in RecvMsg. + // + // Only one of cancel or trailer needs to be logged. In the cases where + // users don't call RecvMsg, users must have already canceled the RPC. + if cs.binlog != nil && status.Code(err) == codes.Canceled { + cs.binlog.Log(&binarylog.Cancel{ + OnClientSide: true, + }) + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo) + } + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { + cs := a.cs + if a.statsHandler != nil && payInfo == nil { + payInfo = &payloadInfo{} + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength, + Length: len(payInfo.uncompressedBytes), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + if a.statsHandler != nil { + end := &stats.End{ + Client: true, + BeginTime: a.cs.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + a.statsHandler.HandleRPC(a.cs.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + as.callInfo.stream = as + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-ac.ctx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payld) > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m interface{}) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// All errors returned from ServerStream methods are compatible with the +// status package. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler stats.Handler + + binlog *binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := ss.t.WriteHeader(ss.s, md) + if ss.binlog != nil && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if ss.binlog != nil { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + ss.binlog.Log(&binarylog.ServerMessage{ + Message: data, + }) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if ss.statsHandler != nil || ss.binlog != nil { + payInfo = &payloadInfo{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err == io.EOF { + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientHalfClose{}) + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength, + Length: len(payInfo.uncompressedBytes), + }) + } + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes, + }) + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, err + } + compData, err := compress(data, cp, comp) + if err != nil { + return nil, nil, nil, err + } + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 0000000000..584360f681 --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. +package tap + +import ( + "context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 0000000000..07a2d26b3e --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 0000000000..1a831b159a --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.27.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100644 index 0000000000..0e73707278 --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,159 @@ +#!/bin/bash + +if [[ `uname -a` = *"Darwin"* ]]; then + echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047" + exit 1 +fi + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +die() { + echo "$@" >&2 + exit 1 +} + +fail_on_output() { + tee /dev/stderr | (! read) +} + +# Check to make sure it's safe to modify the user's git repo. +git status --porcelain | fail_on_output + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" + +if [[ "$1" = "-install" ]]; then + # Check for module support + if go help mod >& /dev/null; then + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell \ + github.com/golang/protobuf/protoc-gen-go + popd + else + # Ye olde `go get` incantation. + # Note: this gets the latest version of all tools (vs. the pinned versions + # with Go modules). + go get -u \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell \ + github.com/golang/protobuf/protoc-gen-go + fi + if [[ -z "${VET_SKIP_PROTO}" ]]; then + if [[ "${TRAVIS}" = "true" ]]; then + PROTOBUF_VERSION=3.3.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif ! which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +# - Ensure all source files contain a copyright message. +(! git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go') + +# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. +(! grep 'func Test[^(]' *_test.go) +(! grep 'func Test[^(]' test/*.go) + +# - Do not import x/net/context. +(! git grep -l 'x/net/context' -- "*.go") + +# - Do not import math/rand for real library code. Use internal/grpcrand for +# thread safety. +git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test') + +# - Ensure all ptypes proto packages are renamed when importing. +(! git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go") + +# - Check imports that are illegal in appengine (until Go 1.11). +# TODO: Remove when we drop Go 1.10 support +go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go + +# - gofmt, goimports, golint (with exceptions for generated code), go vet. +gofmt -s -d -l . 2>&1 | fail_on_output +goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go") +golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") +go vet -all . + +misspell -error . + +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +# - Check that our module is tidy. +if go help mod >& /dev/null; then + go mod tidy && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +# - Collection of static analysis checks +# +# TODO(dfawley): don't use deprecated functions in examples or first-party +# plugins. +SC_OUT="$(mktemp)" +staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +# Error if anything other than deprecation warnings are printed. +(! grep -v "is deprecated:.*SA1019" "${SC_OUT}") +# Only ignore the following deprecated types/fields/functions. +(! grep -Fv '.HandleResolvedAddrs +.HandleSubConnStateChange +.HeaderMap +.NewAddress +.NewServiceConfig +.Metadata is deprecated: use Attributes +.Type is deprecated: use Attributes +.UpdateBalancerState +balancer.Picker +grpc.CallCustomCodec +grpc.Code +grpc.Compressor +grpc.Decompressor +grpc.MaxMsgSize +grpc.MethodConfig +grpc.NewGZIPCompressor +grpc.NewGZIPDecompressor +grpc.RPCCompressor +grpc.RPCDecompressor +grpc.RoundRobin +grpc.ServiceConfig +grpc.WithBalancer +grpc.WithBalancerName +grpc.WithCompressor +grpc.WithDecompressor +grpc.WithDialer +grpc.WithMaxMsgSize +grpc.WithServiceConfig +grpc.WithTimeout +http.CloseNotifier +naming.Resolver +naming.Update +naming.Watcher +resolver.Backend +resolver.GRPCLB' "${SC_OUT}" +) diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore new file mode 100644 index 0000000000..836562412f --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml new file mode 100644 index 0000000000..65dcbc56dc --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.8 + - 1.7 + - 1.6 \ No newline at end of file diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE new file mode 100644 index 0000000000..c3d4cc307d --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Nate Finch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/README.md b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md new file mode 100644 index 0000000000..060eae52a2 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md @@ -0,0 +1,179 @@ +# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://travis-ci.org/natefinch/lumberjack.svg?branch=v2.0)](https://travis-ci.org/natefinch/lumberjack) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0) + +### Lumberjack is a Go package for writing logs to rolling files. + +Package lumberjack provides a rolling logger. + +Note that this is v2.0 of lumberjack, and should be imported using gopkg.in +thusly: + + import "gopkg.in/natefinch/lumberjack.v2" + +The package name remains simply lumberjack, and the code resides at +https://github.com/natefinch/lumberjack under the v2.0 branch. + +Lumberjack is intended to be one part of a logging infrastructure. +It is not an all-in-one solution, but instead is a pluggable +component at the bottom of the logging stack that simply controls the files +to which logs are written. + +Lumberjack plays well with any logging package that can write to an +io.Writer, including the standard library's log package. + +Lumberjack assumes that only one process is writing to the output files. +Using the same lumberjack configuration from multiple processes on the same +machine will result in improper behavior. + + +**Example** + +To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts. + +Code: + +```go +log.SetOutput(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, //days + Compress: true, // disabled by default +}) +``` + + + +## type Logger +``` go +type Logger struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string `json:"filename" yaml:"filename"` + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"maxsize" yaml:"maxsize"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"maxage" yaml:"maxage"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"maxbackups" yaml:"maxbackups"` + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool `json:"localtime" yaml:"localtime"` + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool `json:"compress" yaml:"compress"` + // contains filtered or unexported fields +} +``` +Logger is an io.WriteCloser that writes to the specified filename. + +Logger opens or creates the logfile on first Write. If the file exists and +is less than MaxSize megabytes, lumberjack will open and append to that file. +If the file exists and its size is >= MaxSize megabytes, the file is renamed +by putting the current time in a timestamp in the name immediately before the +file's extension (or the end of the filename if there's no extension). A new +log file is then created using original filename. + +Whenever a write would cause the current log file exceed MaxSize megabytes, +the current file is closed, renamed, and a new log file created with the +original name. Thus, the filename you give Logger is always the "current" log +file. + +Backups use the log file name given to Logger, in the form `name-timestamp.ext` +where name is the filename without the extension, timestamp is the time at which +the log was rotated formatted with the time.Time format of +`2006-01-02T15-04-05.000` and the extension is the original extension. For +example, if your Logger.Filename is `/var/log/foo/server.log`, a backup created +at 6:30pm on Nov 11 2016 would use the filename +`/var/log/foo/server-2016-11-04T18-30-00.000.log` + +### Cleaning Up Old Log Files +Whenever a new logfile gets created, old log files may be deleted. The most +recent files according to the encoded timestamp will be retained, up to a +number equal to MaxBackups (or all of them if MaxBackups is 0). Any files +with an encoded timestamp older than MaxAge days are deleted, regardless of +MaxBackups. Note that the time encoded in the timestamp is the rotation +time, which may differ from the last time that file was written to. + +If MaxBackups and MaxAge are both 0, no old log files will be deleted. + + + + + + + + + + + +### func (\*Logger) Close +``` go +func (l *Logger) Close() error +``` +Close implements io.Closer, and closes the current logfile. + + + +### func (\*Logger) Rotate +``` go +func (l *Logger) Rotate() error +``` +Rotate causes Logger to close the existing log file and immediately create a +new one. This is a helper function for applications that want to initiate +rotations outside of the normal rotation rules, such as in response to +SIGHUP. After rotating, this initiates a cleanup of old log files according +to the normal rules. + +**Example** + +Example of how to rotate in response to SIGHUP. + +Code: + +```go +l := &lumberjack.Logger{} +log.SetOutput(l) +c := make(chan os.Signal, 1) +signal.Notify(c, syscall.SIGHUP) + +go func() { + for { + <-c + l.Rotate() + } +}() +``` + +### func (\*Logger) Write +``` go +func (l *Logger) Write(p []byte) (n int, err error) +``` +Write implements io.Writer. If a write would cause the log file to be larger +than MaxSize, the file is closed, renamed to include a timestamp of the +current time, and a new log file is created using the original log file name. +If the length of the write is greater than MaxSize, an error is returned. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go new file mode 100644 index 0000000000..11d0669723 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go @@ -0,0 +1,11 @@ +// +build !linux + +package lumberjack + +import ( + "os" +) + +func chown(_ string, _ os.FileInfo) error { + return nil +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go new file mode 100644 index 0000000000..2758ec9ced --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go @@ -0,0 +1,19 @@ +package lumberjack + +import ( + "os" + "syscall" +) + +// os_Chown is a var so we can mock it out during tests. +var os_Chown = os.Chown + +func chown(name string, info os.FileInfo) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode()) + if err != nil { + return err + } + f.Close() + stat := info.Sys().(*syscall.Stat_t) + return os_Chown(name, int(stat.Uid), int(stat.Gid)) +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go new file mode 100644 index 0000000000..46d97c5531 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go @@ -0,0 +1,541 @@ +// Package lumberjack provides a rolling logger. +// +// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in +// thusly: +// +// import "gopkg.in/natefinch/lumberjack.v2" +// +// The package name remains simply lumberjack, and the code resides at +// https://github.com/natefinch/lumberjack under the v2.0 branch. +// +// Lumberjack is intended to be one part of a logging infrastructure. +// It is not an all-in-one solution, but instead is a pluggable +// component at the bottom of the logging stack that simply controls the files +// to which logs are written. +// +// Lumberjack plays well with any logging package that can write to an +// io.Writer, including the standard library's log package. +// +// Lumberjack assumes that only one process is writing to the output files. +// Using the same lumberjack configuration from multiple processes on the same +// machine will result in improper behavior. +package lumberjack + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" +) + +const ( + backupTimeFormat = "2006-01-02T15-04-05.000" + compressSuffix = ".gz" + defaultMaxSize = 100 +) + +// ensure we always implement io.WriteCloser +var _ io.WriteCloser = (*Logger)(nil) + +// Logger is an io.WriteCloser that writes to the specified filename. +// +// Logger opens or creates the logfile on first Write. If the file exists and +// is less than MaxSize megabytes, lumberjack will open and append to that file. +// If the file exists and its size is >= MaxSize megabytes, the file is renamed +// by putting the current time in a timestamp in the name immediately before the +// file's extension (or the end of the filename if there's no extension). A new +// log file is then created using original filename. +// +// Whenever a write would cause the current log file exceed MaxSize megabytes, +// the current file is closed, renamed, and a new log file created with the +// original name. Thus, the filename you give Logger is always the "current" log +// file. +// +// Backups use the log file name given to Logger, in the form +// `name-timestamp.ext` where name is the filename without the extension, +// timestamp is the time at which the log was rotated formatted with the +// time.Time format of `2006-01-02T15-04-05.000` and the extension is the +// original extension. For example, if your Logger.Filename is +// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would +// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log` +// +// Cleaning Up Old Log Files +// +// Whenever a new logfile gets created, old log files may be deleted. The most +// recent files according to the encoded timestamp will be retained, up to a +// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files +// with an encoded timestamp older than MaxAge days are deleted, regardless of +// MaxBackups. Note that the time encoded in the timestamp is the rotation +// time, which may differ from the last time that file was written to. +// +// If MaxBackups and MaxAge are both 0, no old log files will be deleted. +type Logger struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string `json:"filename" yaml:"filename"` + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"maxsize" yaml:"maxsize"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"maxage" yaml:"maxage"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"maxbackups" yaml:"maxbackups"` + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool `json:"localtime" yaml:"localtime"` + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool `json:"compress" yaml:"compress"` + + size int64 + file *os.File + mu sync.Mutex + + millCh chan bool + startMill sync.Once +} + +var ( + // currentTime exists so it can be mocked out by tests. + currentTime = time.Now + + // os_Stat exists so it can be mocked out by tests. + os_Stat = os.Stat + + // megabyte is the conversion factor between MaxSize and bytes. It is a + // variable so tests can mock it out and not need to write megabytes of data + // to disk. + megabyte = 1024 * 1024 +) + +// Write implements io.Writer. If a write would cause the log file to be larger +// than MaxSize, the file is closed, renamed to include a timestamp of the +// current time, and a new log file is created using the original log file name. +// If the length of the write is greater than MaxSize, an error is returned. +func (l *Logger) Write(p []byte) (n int, err error) { + l.mu.Lock() + defer l.mu.Unlock() + + writeLen := int64(len(p)) + if writeLen > l.max() { + return 0, fmt.Errorf( + "write length %d exceeds maximum file size %d", writeLen, l.max(), + ) + } + + if l.file == nil { + if err = l.openExistingOrNew(len(p)); err != nil { + return 0, err + } + } + + if l.size+writeLen > l.max() { + if err := l.rotate(); err != nil { + return 0, err + } + } + + n, err = l.file.Write(p) + l.size += int64(n) + + return n, err +} + +// Close implements io.Closer, and closes the current logfile. +func (l *Logger) Close() error { + l.mu.Lock() + defer l.mu.Unlock() + return l.close() +} + +// close closes the file if it is open. +func (l *Logger) close() error { + if l.file == nil { + return nil + } + err := l.file.Close() + l.file = nil + return err +} + +// Rotate causes Logger to close the existing log file and immediately create a +// new one. This is a helper function for applications that want to initiate +// rotations outside of the normal rotation rules, such as in response to +// SIGHUP. After rotating, this initiates compression and removal of old log +// files according to the configuration. +func (l *Logger) Rotate() error { + l.mu.Lock() + defer l.mu.Unlock() + return l.rotate() +} + +// rotate closes the current file, moves it aside with a timestamp in the name, +// (if it exists), opens a new file with the original filename, and then runs +// post-rotation processing and removal. +func (l *Logger) rotate() error { + if err := l.close(); err != nil { + return err + } + if err := l.openNew(); err != nil { + return err + } + l.mill() + return nil +} + +// openNew opens a new log file for writing, moving any old log file out of the +// way. This methods assumes the file has already been closed. +func (l *Logger) openNew() error { + err := os.MkdirAll(l.dir(), 0744) + if err != nil { + return fmt.Errorf("can't make directories for new logfile: %s", err) + } + + name := l.filename() + mode := os.FileMode(0644) + info, err := os_Stat(name) + if err == nil { + // Copy the mode off the old logfile. + mode = info.Mode() + // move the existing file + newname := backupName(name, l.LocalTime) + if err := os.Rename(name, newname); err != nil { + return fmt.Errorf("can't rename log file: %s", err) + } + + // this is a no-op anywhere but linux + if err := chown(name, info); err != nil { + return err + } + } + + // we use truncate here because this should only get called when we've moved + // the file ourselves. if someone else creates the file in the meantime, + // just wipe out the contents. + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode) + if err != nil { + return fmt.Errorf("can't open new logfile: %s", err) + } + l.file = f + l.size = 0 + return nil +} + +// backupName creates a new filename from the given name, inserting a timestamp +// between the filename and the extension, using the local time if requested +// (otherwise UTC). +func backupName(name string, local bool) string { + dir := filepath.Dir(name) + filename := filepath.Base(name) + ext := filepath.Ext(filename) + prefix := filename[:len(filename)-len(ext)] + t := currentTime() + if !local { + t = t.UTC() + } + + timestamp := t.Format(backupTimeFormat) + return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext)) +} + +// openExistingOrNew opens the logfile if it exists and if the current write +// would not put it over MaxSize. If there is no such file or the write would +// put it over the MaxSize, a new file is created. +func (l *Logger) openExistingOrNew(writeLen int) error { + l.mill() + + filename := l.filename() + info, err := os_Stat(filename) + if os.IsNotExist(err) { + return l.openNew() + } + if err != nil { + return fmt.Errorf("error getting log file info: %s", err) + } + + if info.Size()+int64(writeLen) >= l.max() { + return l.rotate() + } + + file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + // if we fail to open the old log file for some reason, just ignore + // it and open a new log file. + return l.openNew() + } + l.file = file + l.size = info.Size() + return nil +} + +// genFilename generates the name of the logfile from the current time. +func (l *Logger) filename() string { + if l.Filename != "" { + return l.Filename + } + name := filepath.Base(os.Args[0]) + "-lumberjack.log" + return filepath.Join(os.TempDir(), name) +} + +// millRunOnce performs compression and removal of stale log files. +// Log files are compressed if enabled via configuration and old log +// files are removed, keeping at most l.MaxBackups files, as long as +// none of them are older than MaxAge. +func (l *Logger) millRunOnce() error { + if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress { + return nil + } + + files, err := l.oldLogFiles() + if err != nil { + return err + } + + var compress, remove []logInfo + + if l.MaxBackups > 0 && l.MaxBackups < len(files) { + preserved := make(map[string]bool) + var remaining []logInfo + for _, f := range files { + // Only count the uncompressed log file or the + // compressed log file, not both. + fn := f.Name() + if strings.HasSuffix(fn, compressSuffix) { + fn = fn[:len(fn)-len(compressSuffix)] + } + preserved[fn] = true + + if len(preserved) > l.MaxBackups { + remove = append(remove, f) + } else { + remaining = append(remaining, f) + } + } + files = remaining + } + if l.MaxAge > 0 { + diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge)) + cutoff := currentTime().Add(-1 * diff) + + var remaining []logInfo + for _, f := range files { + if f.timestamp.Before(cutoff) { + remove = append(remove, f) + } else { + remaining = append(remaining, f) + } + } + files = remaining + } + + if l.Compress { + for _, f := range files { + if !strings.HasSuffix(f.Name(), compressSuffix) { + compress = append(compress, f) + } + } + } + + for _, f := range remove { + errRemove := os.Remove(filepath.Join(l.dir(), f.Name())) + if err == nil && errRemove != nil { + err = errRemove + } + } + for _, f := range compress { + fn := filepath.Join(l.dir(), f.Name()) + errCompress := compressLogFile(fn, fn+compressSuffix) + if err == nil && errCompress != nil { + err = errCompress + } + } + + return err +} + +// millRun runs in a goroutine to manage post-rotation compression and removal +// of old log files. +func (l *Logger) millRun() { + for _ = range l.millCh { + // what am I going to do, log this? + _ = l.millRunOnce() + } +} + +// mill performs post-rotation compression and removal of stale log files, +// starting the mill goroutine if necessary. +func (l *Logger) mill() { + l.startMill.Do(func() { + l.millCh = make(chan bool, 1) + go l.millRun() + }) + select { + case l.millCh <- true: + default: + } +} + +// oldLogFiles returns the list of backup log files stored in the same +// directory as the current log file, sorted by ModTime +func (l *Logger) oldLogFiles() ([]logInfo, error) { + files, err := ioutil.ReadDir(l.dir()) + if err != nil { + return nil, fmt.Errorf("can't read log file directory: %s", err) + } + logFiles := []logInfo{} + + prefix, ext := l.prefixAndExt() + + for _, f := range files { + if f.IsDir() { + continue + } + if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil { + logFiles = append(logFiles, logInfo{t, f}) + continue + } + if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil { + logFiles = append(logFiles, logInfo{t, f}) + continue + } + // error parsing means that the suffix at the end was not generated + // by lumberjack, and therefore it's not a backup file. + } + + sort.Sort(byFormatTime(logFiles)) + + return logFiles, nil +} + +// timeFromName extracts the formatted time from the filename by stripping off +// the filename's prefix and extension. This prevents someone's filename from +// confusing time.parse. +func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) { + if !strings.HasPrefix(filename, prefix) { + return time.Time{}, errors.New("mismatched prefix") + } + if !strings.HasSuffix(filename, ext) { + return time.Time{}, errors.New("mismatched extension") + } + ts := filename[len(prefix) : len(filename)-len(ext)] + return time.Parse(backupTimeFormat, ts) +} + +// max returns the maximum size in bytes of log files before rolling. +func (l *Logger) max() int64 { + if l.MaxSize == 0 { + return int64(defaultMaxSize * megabyte) + } + return int64(l.MaxSize) * int64(megabyte) +} + +// dir returns the directory for the current filename. +func (l *Logger) dir() string { + return filepath.Dir(l.filename()) +} + +// prefixAndExt returns the filename part and extension part from the Logger's +// filename. +func (l *Logger) prefixAndExt() (prefix, ext string) { + filename := filepath.Base(l.filename()) + ext = filepath.Ext(filename) + prefix = filename[:len(filename)-len(ext)] + "-" + return prefix, ext +} + +// compressLogFile compresses the given log file, removing the +// uncompressed log file if successful. +func compressLogFile(src, dst string) (err error) { + f, err := os.Open(src) + if err != nil { + return fmt.Errorf("failed to open log file: %v", err) + } + defer f.Close() + + fi, err := os_Stat(src) + if err != nil { + return fmt.Errorf("failed to stat log file: %v", err) + } + + if err := chown(dst, fi); err != nil { + return fmt.Errorf("failed to chown compressed log file: %v", err) + } + + // If this file already exists, we presume it was created by + // a previous attempt to compress the log file. + gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode()) + if err != nil { + return fmt.Errorf("failed to open compressed log file: %v", err) + } + defer gzf.Close() + + gz := gzip.NewWriter(gzf) + + defer func() { + if err != nil { + os.Remove(dst) + err = fmt.Errorf("failed to compress log file: %v", err) + } + }() + + if _, err := io.Copy(gz, f); err != nil { + return err + } + if err := gz.Close(); err != nil { + return err + } + if err := gzf.Close(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + if err := os.Remove(src); err != nil { + return err + } + + return nil +} + +// logInfo is a convenience struct to return the filename and its embedded +// timestamp. +type logInfo struct { + timestamp time.Time + os.FileInfo +} + +// byFormatTime sorts by newest time formatted in the name. +type byFormatTime []logInfo + +func (b byFormatTime) Less(i, j int) bool { + return b[i].timestamp.After(b[j].timestamp) +} + +func (b byFormatTime) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b byFormatTime) Len() int { + return len(b) +} diff --git a/vendor/k8s.io/api/admission/v1/doc.go b/vendor/k8s.io/api/admission/v1/doc.go new file mode 100644 index 0000000000..cbc6bb59dd --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=false + +// +groupName=admission.k8s.io + +package v1 // import "k8s.io/api/admission/v1" diff --git a/vendor/k8s.io/api/admission/v1/generated.pb.go b/vendor/k8s.io/api/admission/v1/generated.pb.go new file mode 100644 index 0000000000..04eb206750 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/generated.pb.go @@ -0,0 +1,1792 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } +func (*AdmissionRequest) ProtoMessage() {} +func (*AdmissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4b73421fd5edef9f, []int{0} +} +func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionRequest.Merge(m, src) +} +func (m *AdmissionRequest) XXX_Size() int { + return m.Size() +} +func (m *AdmissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo + +func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } +func (*AdmissionResponse) ProtoMessage() {} +func (*AdmissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4b73421fd5edef9f, []int{1} +} +func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionResponse.Merge(m, src) +} +func (m *AdmissionResponse) XXX_Size() int { + return m.Size() +} +func (m *AdmissionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo + +func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } +func (*AdmissionReview) ProtoMessage() {} +func (*AdmissionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_4b73421fd5edef9f, []int{2} +} +func (m *AdmissionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionReview.Merge(m, src) +} +func (m *AdmissionReview) XXX_Size() int { + return m.Size() +} +func (m *AdmissionReview) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionReview.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1.AdmissionRequest") + proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1.AdmissionResponse") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1.AdmissionResponse.AuditAnnotationsEntry") + proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1.AdmissionReview") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1/generated.proto", fileDescriptor_4b73421fd5edef9f) +} + +var fileDescriptor_4b73421fd5edef9f = []byte{ + // 919 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xd6, 0x8e, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0x58, 0xf9, 0xb0, 0x36, 0x39, 0x20, + 0x17, 0xb5, 0xbb, 0x24, 0x82, 0x2a, 0xaa, 0x38, 0x34, 0x4b, 0x2a, 0x14, 0x90, 0x9a, 0x68, 0xda, + 0x40, 0xc5, 0x01, 0x69, 0xec, 0x9d, 0xda, 0x83, 0xed, 0x99, 0x65, 0x67, 0xd6, 0xc1, 0x37, 0x4e, + 0x9c, 0xf9, 0x06, 0x1c, 0xf9, 0x0c, 0x7c, 0x83, 0x1c, 0x7b, 0xec, 0xc9, 0x22, 0xe6, 0x5b, 0xe4, + 0x84, 0x66, 0x76, 0xf6, 0x4f, 0xf3, 0x47, 0x84, 0x96, 0x93, 0xf7, 0xfd, 0xf9, 0xfd, 0xde, 0xf3, + 0xef, 0xed, 0x7b, 0x0b, 0x1e, 0x4f, 0x77, 0x85, 0x47, 0xb9, 0x3f, 0x4d, 0x86, 0x24, 0x66, 0x44, + 0x12, 0xe1, 0x2f, 0x08, 0x0b, 0x79, 0xec, 0x9b, 0x00, 0x8e, 0xa8, 0x8f, 0xc3, 0x39, 0x15, 0x82, + 0x72, 0xe6, 0x2f, 0xb6, 0xfd, 0x31, 0x61, 0x24, 0xc6, 0x92, 0x84, 0x5e, 0x14, 0x73, 0xc9, 0xe1, + 0x87, 0x69, 0xa2, 0x87, 0x23, 0xea, 0xe5, 0x89, 0xde, 0x62, 0xbb, 0xfb, 0x60, 0x4c, 0xe5, 0x24, + 0x19, 0x7a, 0x23, 0x3e, 0xf7, 0xc7, 0x7c, 0xcc, 0x7d, 0x9d, 0x3f, 0x4c, 0x5e, 0x6a, 0x4b, 0x1b, + 0xfa, 0x29, 0xe5, 0xe9, 0xde, 0x2f, 0x17, 0x4c, 0xe4, 0x84, 0x30, 0x49, 0x47, 0x58, 0x5e, 0x5d, + 0xb5, 0xfb, 0x59, 0x91, 0x3d, 0xc7, 0xa3, 0x09, 0x65, 0x24, 0x5e, 0xfa, 0xd1, 0x74, 0xac, 0x1c, + 0xc2, 0x9f, 0x13, 0x89, 0xaf, 0x42, 0xf9, 0xd7, 0xa1, 0xe2, 0x84, 0x49, 0x3a, 0x27, 0x97, 0x00, + 0x0f, 0xff, 0x0d, 0x20, 0x46, 0x13, 0x32, 0xc7, 0x17, 0x71, 0x5b, 0xbf, 0xdb, 0xa0, 0xb3, 0x97, + 0x89, 0x81, 0xc8, 0x4f, 0x09, 0x11, 0x12, 0x06, 0xa0, 0x9a, 0xd0, 0xd0, 0xb1, 0xfa, 0xd6, 0xc0, + 0x0e, 0x3e, 0x3d, 0x5d, 0xf5, 0x2a, 0xeb, 0x55, 0xaf, 0x7a, 0x7c, 0xb0, 0x7f, 0xbe, 0xea, 0x7d, + 0x74, 0x5d, 0x21, 0xb9, 0x8c, 0x88, 0xf0, 0x8e, 0x0f, 0xf6, 0x91, 0x02, 0xc3, 0x17, 0xa0, 0x36, + 0xa5, 0x2c, 0x74, 0x6e, 0xf5, 0xad, 0x41, 0x6b, 0xe7, 0xa1, 0x57, 0x88, 0x9f, 0xc3, 0xbc, 0x68, + 0x3a, 0x56, 0x0e, 0xe1, 0x29, 0x19, 0xbc, 0xc5, 0xb6, 0xf7, 0x55, 0xcc, 0x93, 0xe8, 0x5b, 0x12, + 0xab, 0x66, 0xbe, 0xa1, 0x2c, 0x0c, 0x36, 0x4d, 0xf1, 0x9a, 0xb2, 0x90, 0x66, 0x84, 0x13, 0xd0, + 0x8c, 0x89, 0xe0, 0x49, 0x3c, 0x22, 0x4e, 0x55, 0xb3, 0x3f, 0xfa, 0xef, 0xec, 0xc8, 0x30, 0x04, + 0x1d, 0x53, 0xa1, 0x99, 0x79, 0x50, 0xce, 0x0e, 0x3f, 0x07, 0x2d, 0x91, 0x0c, 0xb3, 0x80, 0x53, + 0xd3, 0x7a, 0xdc, 0x35, 0x80, 0xd6, 0xb3, 0x22, 0x84, 0xca, 0x79, 0x90, 0x82, 0x56, 0x9c, 0x2a, + 0xa9, 0xba, 0x76, 0xde, 0x7b, 0x27, 0x05, 0xda, 0xaa, 0x14, 0x2a, 0xe8, 0x50, 0x99, 0x1b, 0x2e, + 0x41, 0xdb, 0x98, 0x79, 0x97, 0xb7, 0xdf, 0x59, 0x92, 0xbb, 0xeb, 0x55, 0xaf, 0x8d, 0xde, 0xa4, + 0x45, 0x17, 0xeb, 0xc0, 0xaf, 0x01, 0x34, 0xae, 0x92, 0x10, 0x4e, 0x5b, 0x6b, 0xd4, 0x35, 0x1a, + 0x41, 0x74, 0x29, 0x03, 0x5d, 0x81, 0x82, 0x7d, 0x50, 0x63, 0x78, 0x4e, 0x9c, 0x0d, 0x8d, 0xce, + 0x87, 0xfe, 0x14, 0xcf, 0x09, 0xd2, 0x11, 0xe8, 0x03, 0x5b, 0xfd, 0x8a, 0x08, 0x8f, 0x88, 0x53, + 0xd7, 0x69, 0x77, 0x4c, 0x9a, 0xfd, 0x34, 0x0b, 0xa0, 0x22, 0x07, 0x7e, 0x01, 0x6c, 0x1e, 0xa9, + 0x57, 0x9d, 0x72, 0xe6, 0x34, 0x34, 0xc0, 0xcd, 0x00, 0x87, 0x59, 0xe0, 0xbc, 0x6c, 0xa0, 0x02, + 0x00, 0x9f, 0x83, 0x66, 0x22, 0x48, 0x7c, 0xc0, 0x5e, 0x72, 0xa7, 0xa9, 0x05, 0xfd, 0xd8, 0x2b, + 0x9f, 0x8f, 0x37, 0xd6, 0x5e, 0x09, 0x79, 0x6c, 0xb2, 0x8b, 0xf7, 0x29, 0xf3, 0xa0, 0x9c, 0x09, + 0x1e, 0x83, 0x3a, 0x1f, 0xfe, 0x48, 0x46, 0xd2, 0xb1, 0x35, 0xe7, 0x83, 0x6b, 0x87, 0x64, 0xb6, + 0xd6, 0x43, 0xf8, 0xe4, 0xc9, 0xcf, 0x92, 0x30, 0x35, 0x9f, 0xe0, 0xb6, 0xa1, 0xae, 0x1f, 0x6a, + 0x12, 0x64, 0xc8, 0xe0, 0x0f, 0xc0, 0xe6, 0xb3, 0x30, 0x75, 0x3a, 0xe0, 0x6d, 0x98, 0x73, 0x29, + 0x0f, 0x33, 0x1e, 0x54, 0x50, 0xc2, 0x2d, 0x50, 0x0f, 0xe3, 0x25, 0x4a, 0x98, 0xd3, 0xea, 0x5b, + 0x83, 0x66, 0x00, 0x54, 0x0f, 0xfb, 0xda, 0x83, 0x4c, 0x04, 0xbe, 0x00, 0x0d, 0x1e, 0x29, 0x31, + 0x84, 0xb3, 0xf9, 0x36, 0x1d, 0xb4, 0x4d, 0x07, 0x8d, 0xc3, 0x94, 0x05, 0x65, 0x74, 0x5b, 0x7f, + 0xd4, 0xc0, 0x9d, 0xd2, 0x85, 0x12, 0x11, 0x67, 0x82, 0xfc, 0x2f, 0x27, 0xea, 0x1e, 0x68, 0xe0, + 0xd9, 0x8c, 0x9f, 0x90, 0xf4, 0x4a, 0x35, 0x8b, 0x26, 0xf6, 0x52, 0x37, 0xca, 0xe2, 0xf0, 0x08, + 0xd4, 0x85, 0xc4, 0x32, 0x11, 0xe6, 0xe2, 0xdc, 0xbf, 0xd9, 0x7a, 0x3d, 0xd3, 0x98, 0x54, 0x30, + 0x44, 0x44, 0x32, 0x93, 0xc8, 0xf0, 0xc0, 0x1e, 0xd8, 0x88, 0xb0, 0x1c, 0x4d, 0xf4, 0x55, 0xd9, + 0x0c, 0xec, 0xf5, 0xaa, 0xb7, 0x71, 0xa4, 0x1c, 0x28, 0xf5, 0xc3, 0x5d, 0x60, 0xeb, 0x87, 0xe7, + 0xcb, 0x28, 0x5b, 0x8c, 0xae, 0x1a, 0xd1, 0x51, 0xe6, 0x3c, 0x2f, 0x1b, 0xa8, 0x48, 0x86, 0xbf, + 0x5a, 0xa0, 0x83, 0x93, 0x90, 0xca, 0x3d, 0xc6, 0xb8, 0xc4, 0xe9, 0x54, 0xea, 0xfd, 0xea, 0xa0, + 0xb5, 0xf3, 0xd8, 0xbb, 0xe6, 0x23, 0xe8, 0x5d, 0x92, 0xd8, 0xdb, 0xbb, 0x40, 0xf1, 0x84, 0xc9, + 0x78, 0x19, 0x38, 0x46, 0xa3, 0xce, 0xc5, 0x30, 0xba, 0x54, 0x13, 0x0e, 0x40, 0xf3, 0x04, 0xc7, + 0x8c, 0xb2, 0xb1, 0x70, 0x1a, 0xfd, 0xaa, 0x5a, 0x6d, 0xb5, 0x19, 0xdf, 0x19, 0x1f, 0xca, 0xa3, + 0xdd, 0x2f, 0xc1, 0x07, 0x57, 0x96, 0x83, 0x1d, 0x50, 0x9d, 0x92, 0x65, 0x3a, 0x67, 0xa4, 0x1e, + 0xe1, 0xfb, 0x60, 0x63, 0x81, 0x67, 0x09, 0xd1, 0x33, 0xb3, 0x51, 0x6a, 0x3c, 0xba, 0xb5, 0x6b, + 0x6d, 0xfd, 0x69, 0x81, 0x76, 0xe9, 0x6f, 0x2c, 0x28, 0x39, 0x81, 0x47, 0xa0, 0x61, 0xee, 0x8d, + 0xe6, 0x68, 0xed, 0xdc, 0xbb, 0x89, 0x02, 0x1a, 0x10, 0xb4, 0xd4, 0xab, 0x90, 0xdd, 0xc1, 0x8c, + 0x46, 0x9d, 0x86, 0xd8, 0x48, 0x64, 0x3e, 0x6e, 0x9f, 0xdc, 0x5c, 0xd4, 0x54, 0x80, 0xcc, 0x42, + 0x39, 0x53, 0x30, 0x38, 0x3d, 0x73, 0x2b, 0xaf, 0xce, 0xdc, 0xca, 0xeb, 0x33, 0xb7, 0xf2, 0xcb, + 0xda, 0xb5, 0x4e, 0xd7, 0xae, 0xf5, 0x6a, 0xed, 0x5a, 0xaf, 0xd7, 0xae, 0xf5, 0xd7, 0xda, 0xb5, + 0x7e, 0xfb, 0xdb, 0xad, 0x7c, 0x7f, 0x6b, 0xb1, 0xfd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x62, + 0xcb, 0x64, 0xf1, 0x09, 0x09, 0x00, 0x00, +} + +func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RequestSubResource) + copy(dAtA[i:], m.RequestSubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) + i-- + dAtA[i] = 0x7a + if m.RequestResource != nil { + { + size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.RequestKind != nil { + { + size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + if m.DryRun != nil { + i-- + if *m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + { + size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x3a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x32 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x2a + i -= len(m.SubResource) + copy(dAtA[i:], m.SubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Warnings[iNdEx]) + copy(dAtA[i:], m.Warnings[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warnings[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.AuditAnnotations) > 0 { + keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) + for k := range m.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if m.PatchType != nil { + i -= len(*m.PatchType) + copy(dAtA[i:], *m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) + i-- + dAtA[i] = 0x2a + } + if m.Patch != nil { + i -= len(m.Patch) + copy(dAtA[i:], m.Patch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) + i-- + dAtA[i] = 0x22 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AdmissionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Kind.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubResource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.OldObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DryRun != nil { + n += 2 + } + l = m.Options.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RequestKind != nil { + l = m.RequestKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RequestResource != nil { + l = m.RequestResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.RequestSubResource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AdmissionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Patch != nil { + l = len(m.Patch) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PatchType != nil { + l = len(*m.PatchType) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for k, v := range m.AuditAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Warnings) > 0 { + for _, s := range m.Warnings { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AdmissionReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AdmissionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, + `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`, + `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionResponse) String() string { + if this == nil { + return "nil" + } + keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) + for k := range this.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + mapStringForAuditAnnotations := "map[string]string{" + for _, k := range keysForAuditAnnotations { + mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) + } + mapStringForAuditAnnotations += "}" + s := strings.Join([]string{`&AdmissionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`, + `Patch:` + valueToStringGenerated(this.Patch) + `,`, + `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, + `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, + `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionReview{`, + `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operation = Operation(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DryRun = &b + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestKind == nil { + m.RequestKind = &v1.GroupVersionKind{} + } + if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestResource == nil { + m.RequestResource = &v1.GroupVersionResource{} + } + if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestSubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &v1.Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...) + if m.Patch == nil { + m.Patch = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PatchType(dAtA[iNdEx:postIndex]) + m.PatchType = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditAnnotations == nil { + m.AuditAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AuditAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &AdmissionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &AdmissionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/admission/v1/generated.proto b/vendor/k8s.io/api/admission/v1/generated.proto new file mode 100644 index 0000000000..5fc0e342e8 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/generated.proto @@ -0,0 +1,167 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.admission.v1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// AdmissionRequest describes the admission.Attributes for the admission request. +message AdmissionRequest { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; + + // Resource is the fully-qualified resource being requested (for example, v1.pods) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; + + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + optional string subResource = 4; + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; + + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; + + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional string requestSubResource = 15; + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + optional string name = 5; + + // Namespace is the namespace associated with the request (if any). + // +optional + optional string namespace = 6; + + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + optional string operation = 7; + + // UserInfo is information about the requesting user + optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; + + // Object is the object from the incoming request. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; + + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; + + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + optional bool dryRun = 11; + + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; +} + +// AdmissionResponse describes an admission response. +message AdmissionResponse { + // UID is an identifier for the individual request/response. + // This must be copied over from the corresponding AdmissionRequest. + optional string uid = 1; + + // Allowed indicates whether or not the admission request was permitted. + optional bool allowed = 2; + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + optional bytes patch = 4; + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + optional string patchType = 5; + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + map auditAnnotations = 6; + + // warnings is a list of warning messages to return to the requesting API client. + // Warning messages describe a problem the client making the API request should correct or be aware of. + // Limit warnings to 120 characters if possible. + // Warnings over 256 characters and large numbers of warnings may be truncated. + // +optional + repeated string warnings = 7; +} + +// AdmissionReview describes an admission review request/response. +message AdmissionReview { + // Request describes the attributes for the admission request. + // +optional + optional AdmissionRequest request = 1; + + // Response describes the attributes for the admission response. + // +optional + optional AdmissionResponse response = 2; +} + diff --git a/vendor/k8s.io/api/admission/v1/register.go b/vendor/k8s.io/api/admission/v1/register.go new file mode 100644 index 0000000000..b548509ab3 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "admission.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admission/v1/types.go b/vendor/k8s.io/api/admission/v1/types.go new file mode 100644 index 0000000000..556fd1ad54 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/types.go @@ -0,0 +1,169 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdmissionReview describes an admission review request/response. +type AdmissionReview struct { + metav1.TypeMeta `json:",inline"` + // Request describes the attributes for the admission request. + // +optional + Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // Response describes the attributes for the admission response. + // +optional + Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// AdmissionRequest describes the admission.Attributes for the admission request. +type AdmissionRequest struct { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Resource is the fully-qualified resource being requested (for example, v1.pods) + Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"` + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"` + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"` + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"` + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"` + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` + // Namespace is the namespace associated with the request (if any). + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` + // UserInfo is information about the requesting user + UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` + // Object is the object from the incoming request. + // +optional + Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"` +} + +// AdmissionResponse describes an admission response. +type AdmissionResponse struct { + // UID is an identifier for the individual request/response. + // This must be copied over from the corresponding AdmissionRequest. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + + // Allowed indicates whether or not the admission request was permitted. + Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"` + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"` + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"` + + // warnings is a list of warning messages to return to the requesting API client. + // Warning messages describe a problem the client making the API request should correct or be aware of. + // Limit warnings to 120 characters if possible. + // Warnings over 256 characters and large numbers of warnings may be truncated. + // +optional + Warnings []string `json:"warnings,omitempty" protobuf:"bytes,7,rep,name=warnings"` +} + +// PatchType is the type of patch being used to represent the mutated object +type PatchType string + +// PatchType constants. +const ( + PatchTypeJSONPatch PatchType = "JSONPatch" +) + +// Operation is the type of resource operation being checked for admission control +type Operation string + +// Operation constants +const ( + Create Operation = "CREATE" + Update Operation = "UPDATE" + Delete Operation = "DELETE" + Connect Operation = "CONNECT" +) diff --git a/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..f81594c912 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go @@ -0,0 +1,78 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AdmissionRequest = map[string]string{ + "": "AdmissionRequest describes the admission.Attributes for the admission request.", + "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", + "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)", + "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)", + "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")", + "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.", + "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.", + "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.", + "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.", + "namespace": "Namespace is the namespace associated with the request (if any).", + "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", + "userInfo": "UserInfo is information about the requesting user", + "object": "Object is the object from the incoming request.", + "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.", + "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", + "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", +} + +func (AdmissionRequest) SwaggerDoc() map[string]string { + return map_AdmissionRequest +} + +var map_AdmissionResponse = map[string]string{ + "": "AdmissionResponse describes an admission response.", + "uid": "UID is an identifier for the individual request/response. This must be copied over from the corresponding AdmissionRequest.", + "allowed": "Allowed indicates whether or not the admission request was permitted.", + "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".", + "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.", + "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".", + "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.", + "warnings": "warnings is a list of warning messages to return to the requesting API client. Warning messages describe a problem the client making the API request should correct or be aware of. Limit warnings to 120 characters if possible. Warnings over 256 characters and large numbers of warnings may be truncated.", +} + +func (AdmissionResponse) SwaggerDoc() map[string]string { + return map_AdmissionResponse +} + +var map_AdmissionReview = map[string]string{ + "": "AdmissionReview describes an admission review request/response.", + "request": "Request describes the attributes for the admission request.", + "response": "Response describes the attributes for the admission response.", +} + +func (AdmissionReview) SwaggerDoc() map[string]string { + return map_AdmissionReview +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f7369471a8 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go @@ -0,0 +1,141 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { + *out = *in + out.Kind = in.Kind + out.Resource = in.Resource + if in.RequestKind != nil { + in, out := &in.RequestKind, &out.RequestKind + *out = new(metav1.GroupVersionKind) + **out = **in + } + if in.RequestResource != nil { + in, out := &in.RequestResource, &out.RequestResource + *out = new(metav1.GroupVersionResource) + **out = **in + } + in.UserInfo.DeepCopyInto(&out.UserInfo) + in.Object.DeepCopyInto(&out.Object) + in.OldObject.DeepCopyInto(&out.OldObject) + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = new(bool) + **out = **in + } + in.Options.DeepCopyInto(&out.Options) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest. +func (in *AdmissionRequest) DeepCopy() *AdmissionRequest { + if in == nil { + return nil + } + out := new(AdmissionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) { + *out = *in + if in.Result != nil { + in, out := &in.Result, &out.Result + *out = new(metav1.Status) + (*in).DeepCopyInto(*out) + } + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.PatchType != nil { + in, out := &in.PatchType, &out.PatchType + *out = new(PatchType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Warnings != nil { + in, out := &in.Warnings, &out.Warnings + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse. +func (in *AdmissionResponse) DeepCopy() *AdmissionResponse { + if in == nil { + return nil + } + out := new(AdmissionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(AdmissionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(AdmissionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. +func (in *AdmissionReview) DeepCopy() *AdmissionReview { + if in == nil { + return nil + } + out := new(AdmissionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/admission/v1beta1/doc.go b/vendor/k8s.io/api/admission/v1beta1/doc.go new file mode 100644 index 0000000000..a5669022a0 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=false +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=admission.k8s.io + +package v1beta1 // import "k8s.io/api/admission/v1beta1" diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go new file mode 100644 index 0000000000..ae82ff5996 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go @@ -0,0 +1,1792 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } +func (*AdmissionRequest) ProtoMessage() {} +func (*AdmissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{0} +} +func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionRequest.Merge(m, src) +} +func (m *AdmissionRequest) XXX_Size() int { + return m.Size() +} +func (m *AdmissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo + +func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } +func (*AdmissionResponse) ProtoMessage() {} +func (*AdmissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{1} +} +func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionResponse.Merge(m, src) +} +func (m *AdmissionResponse) XXX_Size() int { + return m.Size() +} +func (m *AdmissionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo + +func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } +func (*AdmissionReview) ProtoMessage() {} +func (*AdmissionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{2} +} +func (m *AdmissionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionReview.Merge(m, src) +} +func (m *AdmissionReview) XXX_Size() int { + return m.Size() +} +func (m *AdmissionReview) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionReview.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1beta1.AdmissionRequest") + proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse.AuditAnnotationsEntry") + proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1beta1.AdmissionReview") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto", fileDescriptor_b87c2352de86eab9) +} + +var fileDescriptor_b87c2352de86eab9 = []byte{ + // 925 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcb, 0x6e, 0x23, 0x45, + 0x17, 0x76, 0x8f, 0x1d, 0xdb, 0x5d, 0xce, 0x3f, 0xf6, 0xd4, 0xfc, 0x48, 0x2d, 0x0b, 0xb5, 0x4d, + 0x16, 0xc8, 0x48, 0x93, 0x6a, 0x12, 0xc1, 0x28, 0x1a, 0xb1, 0x49, 0x93, 0x08, 0x05, 0xa4, 0x49, + 0x54, 0x33, 0x86, 0x81, 0x05, 0x52, 0xd9, 0xae, 0xb1, 0x1b, 0xdb, 0x55, 0x4d, 0x57, 0xb5, 0x83, + 0x77, 0xec, 0xd9, 0xf0, 0x06, 0xbc, 0x00, 0x6f, 0xc1, 0x26, 0xcb, 0x59, 0xce, 0xca, 0x22, 0xe6, + 0x2d, 0xb2, 0x42, 0x55, 0x5d, 0x7d, 0x19, 0x27, 0x81, 0xb9, 0xb0, 0x72, 0x9f, 0xcb, 0xf7, 0x9d, + 0xe3, 0xef, 0xf4, 0x39, 0x0d, 0x8e, 0xa7, 0x07, 0x02, 0x05, 0xdc, 0x9b, 0xc6, 0x03, 0x1a, 0x31, + 0x2a, 0xa9, 0xf0, 0x16, 0x94, 0x8d, 0x78, 0xe4, 0x99, 0x00, 0x09, 0x03, 0x8f, 0x8c, 0xe6, 0x81, + 0x10, 0x01, 0x67, 0xde, 0x62, 0x6f, 0x40, 0x25, 0xd9, 0xf3, 0xc6, 0x94, 0xd1, 0x88, 0x48, 0x3a, + 0x42, 0x61, 0xc4, 0x25, 0x87, 0xef, 0x27, 0xd9, 0x88, 0x84, 0x01, 0xca, 0xb2, 0x91, 0xc9, 0x6e, + 0xef, 0x8e, 0x03, 0x39, 0x89, 0x07, 0x68, 0xc8, 0xe7, 0xde, 0x98, 0x8f, 0xb9, 0xa7, 0x41, 0x83, + 0xf8, 0xb9, 0xb6, 0xb4, 0xa1, 0x9f, 0x12, 0xb2, 0xf6, 0x83, 0x62, 0xe9, 0x58, 0x4e, 0x28, 0x93, + 0xc1, 0x90, 0xc8, 0xa4, 0xfe, 0x66, 0xe9, 0xf6, 0x27, 0x79, 0xf6, 0x9c, 0x0c, 0x27, 0x01, 0xa3, + 0xd1, 0xd2, 0x0b, 0xa7, 0x63, 0xe5, 0x10, 0xde, 0x9c, 0x4a, 0x72, 0x13, 0xca, 0xbb, 0x0d, 0x15, + 0xc5, 0x4c, 0x06, 0x73, 0x7a, 0x0d, 0xf0, 0xf0, 0xdf, 0x00, 0x62, 0x38, 0xa1, 0x73, 0xb2, 0x89, + 0xdb, 0xf9, 0xcd, 0x06, 0xad, 0xc3, 0x54, 0x11, 0x4c, 0x7f, 0x8c, 0xa9, 0x90, 0xd0, 0x07, 0xe5, + 0x38, 0x18, 0x39, 0x56, 0xd7, 0xea, 0xd9, 0xfe, 0xc7, 0x17, 0xab, 0x4e, 0x69, 0xbd, 0xea, 0x94, + 0xfb, 0x27, 0x47, 0x57, 0xab, 0xce, 0x07, 0xb7, 0x15, 0x92, 0xcb, 0x90, 0x0a, 0xd4, 0x3f, 0x39, + 0xc2, 0x0a, 0x0c, 0x9f, 0x81, 0xca, 0x34, 0x60, 0x23, 0xe7, 0x4e, 0xd7, 0xea, 0x35, 0xf6, 0x1f, + 0xa2, 0x7c, 0x02, 0x19, 0x0c, 0x85, 0xd3, 0xb1, 0x72, 0x08, 0xa4, 0x64, 0x40, 0x8b, 0x3d, 0xf4, + 0x45, 0xc4, 0xe3, 0xf0, 0x6b, 0x1a, 0xa9, 0x66, 0xbe, 0x0a, 0xd8, 0xc8, 0xdf, 0x36, 0xc5, 0x2b, + 0xca, 0xc2, 0x9a, 0x11, 0x4e, 0x40, 0x3d, 0xa2, 0x82, 0xc7, 0xd1, 0x90, 0x3a, 0x65, 0xcd, 0xfe, + 0xe8, 0xcd, 0xd9, 0xb1, 0x61, 0xf0, 0x5b, 0xa6, 0x42, 0x3d, 0xf5, 0xe0, 0x8c, 0x1d, 0x7e, 0x0a, + 0x1a, 0x22, 0x1e, 0xa4, 0x01, 0xa7, 0xa2, 0xf5, 0xb8, 0x6f, 0x00, 0x8d, 0x27, 0x79, 0x08, 0x17, + 0xf3, 0x60, 0x00, 0x1a, 0x51, 0xa2, 0xa4, 0xea, 0xda, 0xf9, 0xdf, 0x3b, 0x29, 0xd0, 0x54, 0xa5, + 0x70, 0x4e, 0x87, 0x8b, 0xdc, 0x70, 0x09, 0x9a, 0xc6, 0xcc, 0xba, 0xbc, 0xfb, 0xce, 0x92, 0xdc, + 0x5f, 0xaf, 0x3a, 0x4d, 0xfc, 0x2a, 0x2d, 0xde, 0xac, 0x03, 0xbf, 0x04, 0xd0, 0xb8, 0x0a, 0x42, + 0x38, 0x4d, 0xad, 0x51, 0xdb, 0x68, 0x04, 0xf1, 0xb5, 0x0c, 0x7c, 0x03, 0x0a, 0x76, 0x41, 0x85, + 0x91, 0x39, 0x75, 0xb6, 0x34, 0x3a, 0x1b, 0xfa, 0x63, 0x32, 0xa7, 0x58, 0x47, 0xa0, 0x07, 0x6c, + 0xf5, 0x2b, 0x42, 0x32, 0xa4, 0x4e, 0x55, 0xa7, 0xdd, 0x33, 0x69, 0xf6, 0xe3, 0x34, 0x80, 0xf3, + 0x1c, 0xf8, 0x19, 0xb0, 0x79, 0xa8, 0x5e, 0xf5, 0x80, 0x33, 0xa7, 0xa6, 0x01, 0x6e, 0x0a, 0x38, + 0x4d, 0x03, 0x57, 0x45, 0x03, 0xe7, 0x00, 0xf8, 0x14, 0xd4, 0x63, 0x41, 0xa3, 0x13, 0xf6, 0x9c, + 0x3b, 0x75, 0x2d, 0xe8, 0x87, 0xa8, 0x78, 0x43, 0x5e, 0x59, 0x7b, 0x25, 0x64, 0xdf, 0x64, 0xe7, + 0xef, 0x53, 0xea, 0xc1, 0x19, 0x13, 0xec, 0x83, 0x2a, 0x1f, 0xfc, 0x40, 0x87, 0xd2, 0xb1, 0x35, + 0xe7, 0xee, 0xad, 0x43, 0x32, 0x5b, 0x8b, 0x30, 0x39, 0x3f, 0xfe, 0x49, 0x52, 0xa6, 0xe6, 0xe3, + 0xdf, 0x35, 0xd4, 0xd5, 0x53, 0x4d, 0x82, 0x0d, 0x19, 0xfc, 0x1e, 0xd8, 0x7c, 0x36, 0x4a, 0x9c, + 0x0e, 0x78, 0x1b, 0xe6, 0x4c, 0xca, 0xd3, 0x94, 0x07, 0xe7, 0x94, 0x70, 0x07, 0x54, 0x47, 0xd1, + 0x12, 0xc7, 0xcc, 0x69, 0x74, 0xad, 0x5e, 0xdd, 0x07, 0xaa, 0x87, 0x23, 0xed, 0xc1, 0x26, 0x02, + 0x9f, 0x81, 0x1a, 0x0f, 0x95, 0x18, 0xc2, 0xd9, 0x7e, 0x9b, 0x0e, 0x9a, 0xa6, 0x83, 0xda, 0x69, + 0xc2, 0x82, 0x53, 0xba, 0x9d, 0xdf, 0x2b, 0xe0, 0x5e, 0xe1, 0x42, 0x89, 0x90, 0x33, 0x41, 0xff, + 0x93, 0x13, 0xf5, 0x11, 0xa8, 0x91, 0xd9, 0x8c, 0x9f, 0xd3, 0xe4, 0x4a, 0xd5, 0xf3, 0x26, 0x0e, + 0x13, 0x37, 0x4e, 0xe3, 0xf0, 0x0c, 0x54, 0x85, 0x24, 0x32, 0x16, 0xe6, 0xe2, 0x3c, 0x78, 0xbd, + 0xf5, 0x7a, 0xa2, 0x31, 0x89, 0x60, 0x98, 0x8a, 0x78, 0x26, 0xb1, 0xe1, 0x81, 0x1d, 0xb0, 0x15, + 0x12, 0x39, 0x9c, 0xe8, 0xab, 0xb2, 0xed, 0xdb, 0xeb, 0x55, 0x67, 0xeb, 0x4c, 0x39, 0x70, 0xe2, + 0x87, 0x07, 0xc0, 0xd6, 0x0f, 0x4f, 0x97, 0x61, 0xba, 0x18, 0x6d, 0x35, 0xa2, 0xb3, 0xd4, 0x79, + 0x55, 0x34, 0x70, 0x9e, 0x0c, 0x7f, 0xb1, 0x40, 0x8b, 0xc4, 0xa3, 0x40, 0x1e, 0x32, 0xc6, 0x25, + 0x49, 0xa6, 0x52, 0xed, 0x96, 0x7b, 0x8d, 0xfd, 0x63, 0xf4, 0x4f, 0x5f, 0x42, 0x74, 0x4d, 0x67, + 0x74, 0xb8, 0xc1, 0x73, 0xcc, 0x64, 0xb4, 0xf4, 0x1d, 0x23, 0x54, 0x6b, 0x33, 0x8c, 0xaf, 0x15, + 0x86, 0x3d, 0x50, 0x3f, 0x27, 0x11, 0x0b, 0xd8, 0x58, 0x38, 0xb5, 0x6e, 0x59, 0xed, 0xb7, 0x5a, + 0x8f, 0x6f, 0x8c, 0x0f, 0x67, 0xd1, 0xf6, 0xe7, 0xe0, 0xbd, 0x1b, 0xcb, 0xc1, 0x16, 0x28, 0x4f, + 0xe9, 0x32, 0x19, 0x36, 0x56, 0x8f, 0xf0, 0xff, 0x60, 0x6b, 0x41, 0x66, 0x31, 0xd5, 0x83, 0xb3, + 0x71, 0x62, 0x3c, 0xba, 0x73, 0x60, 0xed, 0xfc, 0x61, 0x81, 0x66, 0xe1, 0x6f, 0x2c, 0x02, 0x7a, + 0x0e, 0xfb, 0xa0, 0x66, 0x8e, 0x8e, 0xe6, 0x68, 0xec, 0xa3, 0xd7, 0x96, 0x41, 0xa3, 0xfc, 0x86, + 0x7a, 0x29, 0xd2, 0x8b, 0x98, 0x72, 0xc1, 0x6f, 0xf5, 0x87, 0x48, 0xeb, 0x64, 0x3e, 0x73, 0xde, + 0x1b, 0xca, 0x9b, 0x48, 0x91, 0x5a, 0x38, 0xa3, 0xf3, 0x77, 0x2f, 0x2e, 0xdd, 0xd2, 0x8b, 0x4b, + 0xb7, 0xf4, 0xf2, 0xd2, 0x2d, 0xfd, 0xbc, 0x76, 0xad, 0x8b, 0xb5, 0x6b, 0xbd, 0x58, 0xbb, 0xd6, + 0xcb, 0xb5, 0x6b, 0xfd, 0xb9, 0x76, 0xad, 0x5f, 0xff, 0x72, 0x4b, 0xdf, 0xd5, 0x0c, 0xf1, 0xdf, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x23, 0xa1, 0xd9, 0x27, 0x09, 0x00, 0x00, +} + +func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RequestSubResource) + copy(dAtA[i:], m.RequestSubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) + i-- + dAtA[i] = 0x7a + if m.RequestResource != nil { + { + size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.RequestKind != nil { + { + size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + if m.DryRun != nil { + i-- + if *m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + { + size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x3a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x32 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x2a + i -= len(m.SubResource) + copy(dAtA[i:], m.SubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Warnings[iNdEx]) + copy(dAtA[i:], m.Warnings[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warnings[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.AuditAnnotations) > 0 { + keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) + for k := range m.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if m.PatchType != nil { + i -= len(*m.PatchType) + copy(dAtA[i:], *m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) + i-- + dAtA[i] = 0x2a + } + if m.Patch != nil { + i -= len(m.Patch) + copy(dAtA[i:], m.Patch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) + i-- + dAtA[i] = 0x22 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AdmissionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Kind.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubResource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.OldObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DryRun != nil { + n += 2 + } + l = m.Options.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RequestKind != nil { + l = m.RequestKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RequestResource != nil { + l = m.RequestResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.RequestSubResource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AdmissionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Patch != nil { + l = len(m.Patch) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PatchType != nil { + l = len(*m.PatchType) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for k, v := range m.AuditAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Warnings) > 0 { + for _, s := range m.Warnings { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AdmissionReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AdmissionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, + `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`, + `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionResponse) String() string { + if this == nil { + return "nil" + } + keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) + for k := range this.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + mapStringForAuditAnnotations := "map[string]string{" + for _, k := range keysForAuditAnnotations { + mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) + } + mapStringForAuditAnnotations += "}" + s := strings.Join([]string{`&AdmissionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`, + `Patch:` + valueToStringGenerated(this.Patch) + `,`, + `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, + `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, + `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionReview{`, + `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operation = Operation(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DryRun = &b + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestKind == nil { + m.RequestKind = &v1.GroupVersionKind{} + } + if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestResource == nil { + m.RequestResource = &v1.GroupVersionResource{} + } + if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestSubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &v1.Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...) + if m.Patch == nil { + m.Patch = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PatchType(dAtA[iNdEx:postIndex]) + m.PatchType = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditAnnotations == nil { + m.AuditAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AuditAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &AdmissionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &AdmissionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.proto b/vendor/k8s.io/api/admission/v1beta1/generated.proto new file mode 100644 index 0000000000..41a2643120 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/generated.proto @@ -0,0 +1,167 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.admission.v1beta1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// AdmissionRequest describes the admission.Attributes for the admission request. +message AdmissionRequest { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; + + // Resource is the fully-qualified resource being requested (for example, v1.pods) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; + + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + optional string subResource = 4; + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; + + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; + + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional string requestSubResource = 15; + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + optional string name = 5; + + // Namespace is the namespace associated with the request (if any). + // +optional + optional string namespace = 6; + + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + optional string operation = 7; + + // UserInfo is information about the requesting user + optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; + + // Object is the object from the incoming request. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; + + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; + + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + optional bool dryRun = 11; + + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; +} + +// AdmissionResponse describes an admission response. +message AdmissionResponse { + // UID is an identifier for the individual request/response. + // This should be copied over from the corresponding AdmissionRequest. + optional string uid = 1; + + // Allowed indicates whether or not the admission request was permitted. + optional bool allowed = 2; + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + optional bytes patch = 4; + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + optional string patchType = 5; + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + map auditAnnotations = 6; + + // warnings is a list of warning messages to return to the requesting API client. + // Warning messages describe a problem the client making the API request should correct or be aware of. + // Limit warnings to 120 characters if possible. + // Warnings over 256 characters and large numbers of warnings may be truncated. + // +optional + repeated string warnings = 7; +} + +// AdmissionReview describes an admission review request/response. +message AdmissionReview { + // Request describes the attributes for the admission request. + // +optional + optional AdmissionRequest request = 1; + + // Response describes the attributes for the admission response. + // +optional + optional AdmissionResponse response = 2; +} + diff --git a/vendor/k8s.io/api/admission/v1beta1/register.go b/vendor/k8s.io/api/admission/v1beta1/register.go new file mode 100644 index 0000000000..78d21a0c8a --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "admission.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admission/v1beta1/types.go b/vendor/k8s.io/api/admission/v1beta1/types.go new file mode 100644 index 0000000000..00c619d998 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/types.go @@ -0,0 +1,174 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 +// +k8s:prerelease-lifecycle-gen:deprecated=1.19 +// This API is never server served. It is used for outbound requests from apiservers. This will ensure it never gets served accidentally +// and having the generator against this group will protect future APIs which may be served. +// +k8s:prerelease-lifecycle-gen:replacement=admission.k8s.io,v1,AdmissionReview + +// AdmissionReview describes an admission review request/response. +type AdmissionReview struct { + metav1.TypeMeta `json:",inline"` + // Request describes the attributes for the admission request. + // +optional + Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // Response describes the attributes for the admission response. + // +optional + Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// AdmissionRequest describes the admission.Attributes for the admission request. +type AdmissionRequest struct { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Resource is the fully-qualified resource being requested (for example, v1.pods) + Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"` + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"` + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"` + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"` + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"` + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` + // Namespace is the namespace associated with the request (if any). + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` + // UserInfo is information about the requesting user + UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` + // Object is the object from the incoming request. + // +optional + Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"` +} + +// AdmissionResponse describes an admission response. +type AdmissionResponse struct { + // UID is an identifier for the individual request/response. + // This should be copied over from the corresponding AdmissionRequest. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + + // Allowed indicates whether or not the admission request was permitted. + Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"` + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"` + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"` + + // warnings is a list of warning messages to return to the requesting API client. + // Warning messages describe a problem the client making the API request should correct or be aware of. + // Limit warnings to 120 characters if possible. + // Warnings over 256 characters and large numbers of warnings may be truncated. + // +optional + Warnings []string `json:"warnings,omitempty" protobuf:"bytes,7,rep,name=warnings"` +} + +// PatchType is the type of patch being used to represent the mutated object +type PatchType string + +// PatchType constants. +const ( + PatchTypeJSONPatch PatchType = "JSONPatch" +) + +// Operation is the type of resource operation being checked for admission control +type Operation string + +// Operation constants +const ( + Create Operation = "CREATE" + Update Operation = "UPDATE" + Delete Operation = "DELETE" + Connect Operation = "CONNECT" +) diff --git a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..13067ad80d --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,78 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AdmissionRequest = map[string]string{ + "": "AdmissionRequest describes the admission.Attributes for the admission request.", + "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", + "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)", + "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)", + "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")", + "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.", + "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.", + "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.", + "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.", + "namespace": "Namespace is the namespace associated with the request (if any).", + "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", + "userInfo": "UserInfo is information about the requesting user", + "object": "Object is the object from the incoming request.", + "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.", + "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", + "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", +} + +func (AdmissionRequest) SwaggerDoc() map[string]string { + return map_AdmissionRequest +} + +var map_AdmissionResponse = map[string]string{ + "": "AdmissionResponse describes an admission response.", + "uid": "UID is an identifier for the individual request/response. This should be copied over from the corresponding AdmissionRequest.", + "allowed": "Allowed indicates whether or not the admission request was permitted.", + "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".", + "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.", + "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".", + "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.", + "warnings": "warnings is a list of warning messages to return to the requesting API client. Warning messages describe a problem the client making the API request should correct or be aware of. Limit warnings to 120 characters if possible. Warnings over 256 characters and large numbers of warnings may be truncated.", +} + +func (AdmissionResponse) SwaggerDoc() map[string]string { + return map_AdmissionResponse +} + +var map_AdmissionReview = map[string]string{ + "": "AdmissionReview describes an admission review request/response.", + "request": "Request describes the attributes for the admission request.", + "response": "Response describes the attributes for the admission response.", +} + +func (AdmissionReview) SwaggerDoc() map[string]string { + return map_AdmissionReview +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..4f3dd45bea --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,141 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { + *out = *in + out.Kind = in.Kind + out.Resource = in.Resource + if in.RequestKind != nil { + in, out := &in.RequestKind, &out.RequestKind + *out = new(v1.GroupVersionKind) + **out = **in + } + if in.RequestResource != nil { + in, out := &in.RequestResource, &out.RequestResource + *out = new(v1.GroupVersionResource) + **out = **in + } + in.UserInfo.DeepCopyInto(&out.UserInfo) + in.Object.DeepCopyInto(&out.Object) + in.OldObject.DeepCopyInto(&out.OldObject) + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = new(bool) + **out = **in + } + in.Options.DeepCopyInto(&out.Options) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest. +func (in *AdmissionRequest) DeepCopy() *AdmissionRequest { + if in == nil { + return nil + } + out := new(AdmissionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) { + *out = *in + if in.Result != nil { + in, out := &in.Result, &out.Result + *out = new(v1.Status) + (*in).DeepCopyInto(*out) + } + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.PatchType != nil { + in, out := &in.PatchType, &out.PatchType + *out = new(PatchType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Warnings != nil { + in, out := &in.Warnings, &out.Warnings + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse. +func (in *AdmissionResponse) DeepCopy() *AdmissionResponse { + if in == nil { + return nil + } + out := new(AdmissionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(AdmissionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(AdmissionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. +func (in *AdmissionReview) DeepCopy() *AdmissionReview { + if in == nil { + return nil + } + out := new(AdmissionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..8fc1cde0a6 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,49 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta1 + +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *AdmissionReview) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *AdmissionReview) APILifecycleDeprecated() (major, minor int) { + return 1, 19 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *AdmissionReview) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "admission.k8s.io", Version: "v1", Kind: "AdmissionReview"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *AdmissionReview) APILifecycleRemoved() (major, minor int) { + return 1, 22 +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go new file mode 100644 index 0000000000..f02fa8e434 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go @@ -0,0 +1,49 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package equality + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" +) + +// Semantic can do semantic deep equality checks for api objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.MicroTime) bool { + return a.UTC() == b.UTC() + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go new file mode 100644 index 0000000000..9f20152e45 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation contains generic api type validation functions. +package validation // import "k8s.io/apimachinery/pkg/api/validation" diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go new file mode 100644 index 0000000000..947c96f434 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go @@ -0,0 +1,86 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// IsNegativeErrorMsg is a error message for value must be greater than or equal to 0. +const IsNegativeErrorMsg string = `must be greater than or equal to 0` + +// ValidateNameFunc validates that the provided name is valid for a given resource type. +// Not all resources have the same validation rules for names. Prefix is true +// if the name will have a value appended to it. If the name is not valid, +// this returns a list of descriptions of individual characteristics of the +// value that were not valid. Otherwise this returns an empty list or nil. +type ValidateNameFunc func(name string, prefix bool) []string + +// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. +func NameIsDNSSubdomain(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Subdomain(name) +} + +// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label. +func NameIsDNSLabel(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Label(name) +} + +// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label. +func NameIsDNS1035Label(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1035Label(name) +} + +// ValidateNamespaceName can be used to check whether the given namespace name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateNamespaceName = NameIsDNSLabel + +// ValidateServiceAccountName can be used to check whether the given service account name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateServiceAccountName = NameIsDNSSubdomain + +// maskTrailingDash replaces the final character of a string with a subdomain safe +// value if is a dash. +func maskTrailingDash(name string) string { + if strings.HasSuffix(name, "-") { + return name[:len(name)-2] + "a" + } + return name +} + +// ValidateNonnegativeField validates that given value is not negative. +func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value < 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value, IsNegativeErrorMsg)) + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go new file mode 100644 index 0000000000..889ec69aab --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go @@ -0,0 +1,268 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// FieldImmutableErrorMsg is a error message for field is immutable. +const FieldImmutableErrorMsg string = `field is immutable` + +const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB + +// BannedOwners is a black list of object that are not allowed to be owners. +var BannedOwners = map[schema.GroupVersionKind]struct{}{ + {Group: "", Version: "v1", Kind: "Event"}: {}, +} + +// ValidateClusterName can be used to check whether the given cluster name is valid. +var ValidateClusterName = NameIsDNS1035Label + +// ValidateAnnotations validates that a set of annotations are correctly defined. +func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + var totalSize int64 + for k, v := range annotations { + for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) { + allErrs = append(allErrs, field.Invalid(fldPath, k, msg)) + } + totalSize += (int64)(len(k)) + (int64)(len(v)) + } + if totalSize > (int64)(totalAnnotationSizeLimitB) { + allErrs = append(allErrs, field.TooLong(fldPath, "", totalAnnotationSizeLimitB)) + } + return allErrs +} + +func validateOwnerReference(ownerReference metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + gvk := schema.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind) + // gvk.Group is empty for the legacy group. + if len(gvk.Version) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty")) + } + if len(gvk.Kind) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty")) + } + if len(ownerReference.Name) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty")) + } + if len(ownerReference.UID) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty")) + } + if _, ok := BannedOwners[gvk]; ok { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk))) + } + return allErrs +} + +// ValidateOwnerReferences validates that a set of owner references are correctly defined. +func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + controllerName := "" + for _, ref := range ownerReferences { + allErrs = append(allErrs, validateOwnerReference(ref, fldPath)...) + if ref.Controller != nil && *ref.Controller { + if controllerName != "" { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReferences, + fmt.Sprintf("Only one reference can have Controller set to true. Found \"true\" in references for %v and %v", controllerName, ref.Name))) + } else { + controllerName = ref.Name + } + } + } + return allErrs +} + +// ValidateFinalizerName validates finalizer names. +func ValidateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(stringValue) { + allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg)) + } + + return allErrs +} + +// ValidateNoNewFinalizers validates the new finalizers has no new finalizers compare to old finalizers. +func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + extra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...)) + if len(extra) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("no new finalizers can be added if the object is being deleted, found new finalizers %#v", extra.List()))) + } + return allErrs +} + +// ValidateImmutableField validates the new value and the old value are deeply equal. +func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if !apiequality.Semantic.DeepEqual(oldVal, newVal) { + allErrs = append(allErrs, field.Invalid(fldPath, newVal, FieldImmutableErrorMsg)) + } + return allErrs +} + +// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + metadata, err := meta.Accessor(objMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, objMeta, err.Error())) + return allErrs + } + return ValidateObjectMetaAccessor(metadata, requiresNamespace, nameFn, fldPath) +} + +// ValidateObjectMetaAccessor validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(meta.GetGenerateName()) != 0 { + for _, msg := range nameFn(meta.GetGenerateName(), true) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GetGenerateName(), msg)) + } + } + // If the generated name validates, but the calculated value does not, it's a problem with generation, and we + // report it here. This may confuse users, but indicates a programming bug and still must be validated. + // If there are multiple fields out of which one is required then add an or as a separator + if len(meta.GetName()) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required")) + } else { + for _, msg := range nameFn(meta.GetName(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.GetName(), msg)) + } + } + if requiresNamespace { + if len(meta.GetNamespace()) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "")) + } else { + for _, msg := range ValidateNamespaceName(meta.GetNamespace(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.GetNamespace(), msg)) + } + } + } else { + if len(meta.GetNamespace()) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type")) + } + } + if len(meta.GetClusterName()) != 0 { + for _, msg := range ValidateClusterName(meta.GetClusterName(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("clusterName"), meta.GetClusterName(), msg)) + } + } + for _, entry := range meta.GetManagedFields() { + allErrs = append(allErrs, v1validation.ValidateFieldManager(entry.Manager, fldPath.Child("fieldManager"))...) + } + allErrs = append(allErrs, ValidateNonnegativeField(meta.GetGeneration(), fldPath.Child("generation"))...) + allErrs = append(allErrs, v1validation.ValidateLabels(meta.GetLabels(), fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(meta.GetAnnotations(), fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(meta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, ValidateFinalizers(meta.GetFinalizers(), fldPath.Child("finalizers"))...) + allErrs = append(allErrs, v1validation.ValidateManagedFields(meta.GetManagedFields(), fldPath.Child("managedFields"))...) + return allErrs +} + +// ValidateFinalizers tests if the finalizers name are valid, and if there are conflicting finalizers. +func ValidateFinalizers(finalizers []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + hasFinalizerOrphanDependents := false + hasFinalizerDeleteDependents := false + for _, finalizer := range finalizers { + allErrs = append(allErrs, ValidateFinalizerName(finalizer, fldPath)...) + if finalizer == metav1.FinalizerOrphanDependents { + hasFinalizerOrphanDependents = true + } + if finalizer == metav1.FinalizerDeleteDependents { + hasFinalizerDeleteDependents = true + } + } + if hasFinalizerDeleteDependents && hasFinalizerOrphanDependents { + allErrs = append(allErrs, field.Invalid(fldPath, finalizers, fmt.Sprintf("finalizer %s and %s cannot be both set", metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents))) + } + return allErrs +} + +// ValidateObjectMetaUpdate validates an object's metadata when updated. +func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList { + newMetadata, err := meta.Accessor(newMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, newMeta, err.Error())) + return allErrs + } + oldMetadata, err := meta.Accessor(oldMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, oldMeta, err.Error())) + return allErrs + } + return ValidateObjectMetaAccessorUpdate(newMetadata, oldMetadata, fldPath) +} + +// ValidateObjectMetaAccessorUpdate validates an object's metadata when updated. +func ValidateObjectMetaAccessorUpdate(newMeta, oldMeta metav1.Object, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + // Finalizers cannot be added if the object is already being deleted. + if oldMeta.GetDeletionTimestamp() != nil { + allErrs = append(allErrs, ValidateNoNewFinalizers(newMeta.GetFinalizers(), oldMeta.GetFinalizers(), fldPath.Child("finalizers"))...) + } + + // Reject updates that don't specify a resource version + if len(newMeta.GetResourceVersion()) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceVersion"), newMeta.GetResourceVersion(), "must be specified for an update")) + } + + // Generation shouldn't be decremented + if newMeta.GetGeneration() < oldMeta.GetGeneration() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generation"), newMeta.GetGeneration(), "must not be decremented")) + } + + for _, entry := range newMeta.GetManagedFields() { + allErrs = append(allErrs, v1validation.ValidateFieldManager(entry.Manager, fldPath.Child("fieldManager"))...) + } + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetName(), oldMeta.GetName(), fldPath.Child("name"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetNamespace(), oldMeta.GetNamespace(), fldPath.Child("namespace"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetUID(), oldMeta.GetUID(), fldPath.Child("uid"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetCreationTimestamp(), oldMeta.GetCreationTimestamp(), fldPath.Child("creationTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionTimestamp(), oldMeta.GetDeletionTimestamp(), fldPath.Child("deletionTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionGracePeriodSeconds(), oldMeta.GetDeletionGracePeriodSeconds(), fldPath.Child("deletionGracePeriodSeconds"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetClusterName(), oldMeta.GetClusterName(), fldPath.Child("clusterName"))...) + + allErrs = append(allErrs, v1validation.ValidateLabels(newMeta.GetLabels(), fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(newMeta.GetAnnotations(), fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(newMeta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, v1validation.ValidateManagedFields(newMeta.GetManagedFields(), fldPath.Child("managedFields"))...) + + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go b/vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go new file mode 100644 index 0000000000..ffb9f56d80 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go @@ -0,0 +1,68 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path + +import ( + "fmt" + "strings" +) + +// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store) +var NameMayNotBe = []string{".", ".."} + +// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store) +var NameMayNotContain = []string{"/", "%"} + +// IsValidPathSegmentName validates the name can be safely encoded as a path segment +func IsValidPathSegmentName(name string) []string { + for _, illegalName := range NameMayNotBe { + if name == illegalName { + return []string{fmt.Sprintf(`may not be '%s'`, illegalName)} + } + } + + var errors []string + for _, illegalContent := range NameMayNotContain { + if strings.Contains(name, illegalContent) { + errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent)) + } + } + + return errors +} + +// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment +// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid +func IsValidPathSegmentPrefix(name string) []string { + var errors []string + for _, illegalContent := range NameMayNotContain { + if strings.Contains(name, illegalContent) { + errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent)) + } + } + + return errors +} + +// ValidatePathSegmentName validates the name can be safely encoded as a path segment +func ValidatePathSegmentName(name string, prefix bool) []string { + if prefix { + return IsValidPathSegmentPrefix(name) + } + + return IsValidPathSegmentName(name) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go new file mode 100644 index 0000000000..a45fa2a8a5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme // import "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go new file mode 100644 index 0000000000..472a9aeb23 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "k8s.io/apimachinery/pkg/apis/meta/internalversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// Scheme is the registry for any type that adheres to the meta API spec. +var scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme. +var Codecs = serializer.NewCodecFactory(scheme) + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +// Unlike other API groups, meta internal knows about all meta external versions, but keeps +// the logic for conversion private. +func init() { + utilruntime.Must(internalversion.AddToScheme(scheme)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go new file mode 100644 index 0000000000..8403d1a861 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go @@ -0,0 +1,46 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateListOptions returns all validation errors found while validating the ListOptions. +func ValidateListOptions(options *internalversion.ListOptions) field.ErrorList { + allErrs := field.ErrorList{} + if match := options.ResourceVersionMatch; len(match) > 0 { + if options.Watch { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden for watch")) + } + if len(options.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden unless resourceVersion is provided")) + } + if len(options.Continue) > 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided")) + } + if match != metav1.ResourceVersionMatchExact && match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchExact), string(metav1.ResourceVersionMatchNotOlderThan), ""})) + } + if match == metav1.ResourceVersionMatchExact && options.ResourceVersion == "0" { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch \"exact\" is forbidden for resourceVersion \"0\"")) + } + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go new file mode 100644 index 0000000000..715adf2f97 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -0,0 +1,262 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "regexp" + "unicode" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func ValidateLabelSelector(ps *metav1.LabelSelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if ps == nil { + return allErrs + } + allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) + for i, expr := range ps.MatchExpressions { + allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...) + } + return allErrs +} + +func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch sr.Operator { + case metav1.LabelSelectorOpIn, metav1.LabelSelectorOpNotIn: + if len(sr.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case metav1.LabelSelectorOpExists, metav1.LabelSelectorOpDoesNotExist: + if len(sr.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + } + allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) + return allErrs +} + +// ValidateLabelName validates that the label name is correctly defined. +func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(labelName) { + allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg)) + } + return allErrs +} + +// ValidateLabels validates that a set of labels are correctly defined. +func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k, v := range labels { + allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) + for _, msg := range validation.IsValidLabelValue(v) { + allErrs = append(allErrs, field.Invalid(fldPath, v, msg)) + } + } + return allErrs +} + +func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { + allErrs := field.ErrorList{} + if options.OrphanDependents != nil && options.PropagationPolicy != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("propagationPolicy"), options.PropagationPolicy, "orphanDependents and deletionPropagation cannot be both set")) + } + if options.PropagationPolicy != nil && + *options.PropagationPolicy != metav1.DeletePropagationForeground && + *options.PropagationPolicy != metav1.DeletePropagationBackground && + *options.PropagationPolicy != metav1.DeletePropagationOrphan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"})) + } + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + return allErrs +} + +func ValidateCreateOptions(options *metav1.CreateOptions) field.ErrorList { + return append( + ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")), + ValidateDryRun(field.NewPath("dryRun"), options.DryRun)..., + ) +} + +func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList { + return append( + ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")), + ValidateDryRun(field.NewPath("dryRun"), options.DryRun)..., + ) +} + +func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList { + allErrs := field.ErrorList{} + if patchType != types.ApplyPatchType { + if options.Force != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch")) + } + } else { + if options.FieldManager == "" { + // This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers. + allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch")) + } + } + allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + return allErrs +} + +var FieldManagerMaxLength = 128 + +// ValidateFieldManager valides that the fieldManager is the proper length and +// only has printable characters. +func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // the field can not be set as a `*string`, so a empty string ("") is + // considered as not set and is defaulted by the rest of the process + // (unless apply is used, in which case it is required). + if len(fieldManager) > FieldManagerMaxLength { + allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength)) + } + // Verify that all characters are printable. + for i, r := range fieldManager { + if !unicode.IsPrint(r) { + allErrs = append(allErrs, field.Invalid(fldPath, fieldManager, fmt.Sprintf("invalid character %#U (at position %d)", r, i))) + } + } + + return allErrs +} + +var allowedDryRunValues = sets.NewString(metav1.DryRunAll) + +// ValidateDryRun validates that a dryRun query param only contains allowed values. +func ValidateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList { + allErrs := field.ErrorList{} + if !allowedDryRunValues.HasAll(dryRun...) { + allErrs = append(allErrs, field.NotSupported(fldPath, dryRun, allowedDryRunValues.List())) + } + return allErrs +} + +const UninitializedStatusUpdateErrorMsg string = `must not update status when the object is uninitialized` + +// ValidateTableOptions returns any invalid flags on TableOptions. +func ValidateTableOptions(opts *metav1.TableOptions) field.ErrorList { + var allErrs field.ErrorList + switch opts.IncludeObject { + case metav1.IncludeMetadata, metav1.IncludeNone, metav1.IncludeObject, "": + default: + allErrs = append(allErrs, field.Invalid(field.NewPath("includeObject"), opts.IncludeObject, "must be 'Metadata', 'Object', 'None', or empty")) + } + return allErrs +} + +func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for _, fields := range fieldsList { + switch fields.Operation { + case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate: + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operation"), fields.Operation, "must be `Apply` or `Update`")) + } + if len(fields.FieldsType) > 0 && fields.FieldsType != "FieldsV1" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldsType"), fields.FieldsType, "must be `FieldsV1`")) + } + } + return allErrs +} + +func ValidateConditions(conditions []metav1.Condition, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + conditionTypeToFirstIndex := map[string]int{} + for i, condition := range conditions { + if _, ok := conditionTypeToFirstIndex[condition.Type]; ok { + allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("type"), condition.Type)) + } else { + conditionTypeToFirstIndex[condition.Type] = i + } + + allErrs = append(allErrs, ValidateCondition(condition, fldPath.Index(i))...) + } + + return allErrs +} + +// validConditionStatuses is used internally to check validity and provide a good message +var validConditionStatuses = sets.NewString(string(metav1.ConditionTrue), string(metav1.ConditionFalse), string(metav1.ConditionUnknown)) + +const ( + maxReasonLen = 1 * 1024 + maxMessageLen = 32 * 1024 +) + +func ValidateCondition(condition metav1.Condition, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + // type is set and is a valid format + allErrs = append(allErrs, ValidateLabelName(condition.Type, fldPath.Child("type"))...) + + // status is set and is an accepted value + if !validConditionStatuses.Has(string(condition.Status)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("status"), condition.Status, validConditionStatuses.List())) + } + + if condition.ObservedGeneration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("observedGeneration"), condition.ObservedGeneration, "must be greater than or equal to zero")) + } + + if condition.LastTransitionTime.IsZero() { + allErrs = append(allErrs, field.Required(fldPath.Child("lastTransitionTime"), "must be set")) + } + + if len(condition.Reason) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("reason"), "must be set")) + } else { + for _, currErr := range isValidConditionReason(condition.Reason) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr)) + } + if len(condition.Reason) > maxReasonLen { + allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), condition.Reason, maxReasonLen)) + } + } + + if len(condition.Message) > maxMessageLen { + allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), condition.Message, maxMessageLen)) + } + + return allErrs +} + +const conditionReasonFmt string = "[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?" +const conditionReasonErrMsg string = "a condition reason must start with alphabetic character, optionally followed by a string of alphanumeric characters or '_,:', and must end with an alphanumeric character or '_'" + +var conditionReasonRegexp = regexp.MustCompile("^" + conditionReasonFmt + "$") + +// isValidConditionReason tests for a string that conforms to rules for condition reasons. This checks the format, but not the length. +func isValidConditionReason(value string) []string { + if !conditionReasonRegexp.MatchString(value) { + return []string{validation.RegexError(conditionReasonErrMsg, conditionReasonFmt, "my_name", "MY_NAME", "MyName", "ReasonA,ReasonB", "ReasonA:ReasonB")} + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/validation.go new file mode 100644 index 0000000000..563b62efa5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/validation.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateTableOptions returns any invalid flags on TableOptions. +func ValidateTableOptions(opts *metav1.TableOptions) field.ErrorList { + var allErrs field.ErrorList + switch opts.IncludeObject { + case metav1.IncludeMetadata, metav1.IncludeNone, metav1.IncludeObject, "": + default: + allErrs = append(allErrs, field.Invalid(field.NewPath("includeObject"), opts.IncludeObject, "must be 'Metadata', 'Object', 'None', or empty")) + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go new file mode 100644 index 0000000000..82a473bb14 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go @@ -0,0 +1,127 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rand provides utilities related to randomization. +package rand + +import ( + "math/rand" + "sync" + "time" +) + +var rng = struct { + sync.Mutex + rand *rand.Rand +}{ + rand: rand.New(rand.NewSource(time.Now().UnixNano())), +} + +// Int returns a non-negative pseudo-random int. +func Int() int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Int() +} + +// Intn generates an integer in range [0,max). +// By design this should panic if input is invalid, <= 0. +func Intn(max int) int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Intn(max) +} + +// IntnRange generates an integer in range [min,max). +// By design this should panic if input is invalid, <= 0. +func IntnRange(min, max int) int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Intn(max-min) + min +} + +// IntnRange generates an int64 integer in range [min,max). +// By design this should panic if input is invalid, <= 0. +func Int63nRange(min, max int64) int64 { + rng.Lock() + defer rng.Unlock() + return rng.rand.Int63n(max-min) + min +} + +// Seed seeds the rng with the provided seed. +func Seed(seed int64) { + rng.Lock() + defer rng.Unlock() + + rng.rand = rand.New(rand.NewSource(seed)) +} + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) +// from the default Source. +func Perm(n int) []int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Perm(n) +} + +const ( + // We omit vowels from the set of available characters to reduce the chances + // of "bad words" being formed. + alphanums = "bcdfghjklmnpqrstvwxz2456789" + // No. of bits required to index into alphanums string. + alphanumsIdxBits = 5 + // Mask used to extract last alphanumsIdxBits of an int. + alphanumsIdxMask = 1<>= alphanumsIdxBits + remaining-- + } + return string(b) +} + +// SafeEncodeString encodes s using the same characters as rand.String. This reduces the chances of bad words and +// ensures that strings generated from hash functions appear consistent throughout the API. +func SafeEncodeString(s string) string { + r := make([]byte, len(s)) + for i, b := range []rune(s) { + r[i] = alphanums[(int(b) % len(alphanums))] + } + return string(r) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go b/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go new file mode 100644 index 0000000000..1fa351aab6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go @@ -0,0 +1,27 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uuid + +import ( + "github.com/google/uuid" + + "k8s.io/apimachinery/pkg/types" +) + +func NewUUID() types.UID { + return types.UID(uuid.New().String()) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go new file mode 100644 index 0000000000..a6f29cd7c4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package waitgroup implements SafeWaitGroup wrap of sync.WaitGroup. +// Add with positive delta when waiting will fail, to prevent sync.WaitGroup race issue. +package waitgroup // import "k8s.io/apimachinery/pkg/util/waitgroup" diff --git a/vendor/k8s.io/apimachinery/pkg/util/waitgroup/waitgroup.go b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/waitgroup.go new file mode 100644 index 0000000000..e080a5e92f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/waitgroup.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package waitgroup + +import ( + "fmt" + "sync" +) + +// SafeWaitGroup must not be copied after first use. +type SafeWaitGroup struct { + wg sync.WaitGroup + mu sync.RWMutex + // wait indicate whether Wait is called, if true, + // then any Add with positive delta will return error. + wait bool +} + +// Add adds delta, which may be negative, similar to sync.WaitGroup. +// If Add with a positive delta happens after Wait, it will return error, +// which prevent unsafe Add. +func (wg *SafeWaitGroup) Add(delta int) error { + wg.mu.RLock() + defer wg.mu.RUnlock() + if wg.wait && delta > 0 { + return fmt.Errorf("add with positive delta after Wait is forbidden") + } + wg.wg.Add(delta) + return nil +} + +// Done decrements the WaitGroup counter. +func (wg *SafeWaitGroup) Done() { + wg.wg.Done() +} + +// Wait blocks until the WaitGroup counter is zero. +func (wg *SafeWaitGroup) Wait() { + wg.mu.Lock() + wg.wait = true + wg.mu.Unlock() + wg.wg.Wait() +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/attributes.go b/vendor/k8s.io/apiserver/pkg/admission/attributes.go new file mode 100644 index 0000000000..1d291f6b22 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/attributes.go @@ -0,0 +1,211 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authentication/user" +) + +type attributesRecord struct { + kind schema.GroupVersionKind + namespace string + name string + resource schema.GroupVersionResource + subresource string + operation Operation + options runtime.Object + dryRun bool + object runtime.Object + oldObject runtime.Object + userInfo user.Info + + // other elements are always accessed in single goroutine. + // But ValidatingAdmissionWebhook add annotations concurrently. + annotations map[string]annotation + annotationsLock sync.RWMutex + + reinvocationContext ReinvocationContext +} + +type annotation struct { + level auditinternal.Level + value string +} + +func NewAttributesRecord(object runtime.Object, oldObject runtime.Object, kind schema.GroupVersionKind, namespace, name string, resource schema.GroupVersionResource, subresource string, operation Operation, operationOptions runtime.Object, dryRun bool, userInfo user.Info) Attributes { + return &attributesRecord{ + kind: kind, + namespace: namespace, + name: name, + resource: resource, + subresource: subresource, + operation: operation, + options: operationOptions, + dryRun: dryRun, + object: object, + oldObject: oldObject, + userInfo: userInfo, + reinvocationContext: &reinvocationContext{}, + } +} + +func (record *attributesRecord) GetKind() schema.GroupVersionKind { + return record.kind +} + +func (record *attributesRecord) GetNamespace() string { + return record.namespace +} + +func (record *attributesRecord) GetName() string { + return record.name +} + +func (record *attributesRecord) GetResource() schema.GroupVersionResource { + return record.resource +} + +func (record *attributesRecord) GetSubresource() string { + return record.subresource +} + +func (record *attributesRecord) GetOperation() Operation { + return record.operation +} + +func (record *attributesRecord) GetOperationOptions() runtime.Object { + return record.options +} + +func (record *attributesRecord) IsDryRun() bool { + return record.dryRun +} + +func (record *attributesRecord) GetObject() runtime.Object { + return record.object +} + +func (record *attributesRecord) GetOldObject() runtime.Object { + return record.oldObject +} + +func (record *attributesRecord) GetUserInfo() user.Info { + return record.userInfo +} + +// getAnnotations implements privateAnnotationsGetter.It's a private method used +// by WithAudit decorator. +func (record *attributesRecord) getAnnotations(maxLevel auditinternal.Level) map[string]string { + record.annotationsLock.RLock() + defer record.annotationsLock.RUnlock() + + if record.annotations == nil { + return nil + } + cp := make(map[string]string, len(record.annotations)) + for key, value := range record.annotations { + if value.level.Less(maxLevel) || value.level == maxLevel { + cp[key] = value.value + } + } + return cp +} + +// AddAnnotation adds an annotation to attributesRecord with Metadata audit level +func (record *attributesRecord) AddAnnotation(key, value string) error { + return record.AddAnnotationWithLevel(key, value, auditinternal.LevelMetadata) +} + +func (record *attributesRecord) AddAnnotationWithLevel(key, value string, level auditinternal.Level) error { + if err := checkKeyFormat(key); err != nil { + return err + } + if level.Less(auditinternal.LevelMetadata) { + return fmt.Errorf("admission annotations are not allowed to be set at audit level lower than Metadata, key: %q, level: %s", key, level) + } + record.annotationsLock.Lock() + defer record.annotationsLock.Unlock() + + if record.annotations == nil { + record.annotations = make(map[string]annotation) + } + annotation := annotation{level: level, value: value} + if v, ok := record.annotations[key]; ok && v != annotation { + return fmt.Errorf("admission annotations are not allowd to be overwritten, key:%q, old value: %v, new value: %v", key, record.annotations[key], annotation) + } + record.annotations[key] = annotation + return nil +} + +func (record *attributesRecord) GetReinvocationContext() ReinvocationContext { + return record.reinvocationContext +} + +type reinvocationContext struct { + // isReinvoke is true when admission plugins are being reinvoked + isReinvoke bool + // reinvokeRequested is true when an admission plugin requested a re-invocation of the chain + reinvokeRequested bool + // values stores reinvoke context values per plugin. + values map[string]interface{} +} + +func (rc *reinvocationContext) IsReinvoke() bool { + return rc.isReinvoke +} + +func (rc *reinvocationContext) SetIsReinvoke() { + rc.isReinvoke = true +} + +func (rc *reinvocationContext) ShouldReinvoke() bool { + return rc.reinvokeRequested +} + +func (rc *reinvocationContext) SetShouldReinvoke() { + rc.reinvokeRequested = true +} + +func (rc *reinvocationContext) SetValue(plugin string, v interface{}) { + if rc.values == nil { + rc.values = map[string]interface{}{} + } + rc.values[plugin] = v +} + +func (rc *reinvocationContext) Value(plugin string) interface{} { + return rc.values[plugin] +} + +func checkKeyFormat(key string) error { + parts := strings.Split(key, "/") + if len(parts) != 2 { + return fmt.Errorf("annotation key has invalid format, the right format is a DNS subdomain prefix and '/' and key name. (e.g. 'podsecuritypolicy.admission.k8s.io/admit-policy')") + } + if msgs := validation.IsQualifiedName(key); len(msgs) != 0 { + return fmt.Errorf("annotation key has invalid format %s. A qualified name like 'podsecuritypolicy.admission.k8s.io/admit-policy' is required.", strings.Join(msgs, ",")) + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/audit.go b/vendor/k8s.io/apiserver/pkg/admission/audit.go new file mode 100644 index 0000000000..d1e103cfc6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/audit.go @@ -0,0 +1,103 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "fmt" + + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" +) + +// auditHandler logs annotations set by other admission handlers +type auditHandler struct { + Interface + ae *auditinternal.Event +} + +var _ Interface = &auditHandler{} +var _ MutationInterface = &auditHandler{} +var _ ValidationInterface = &auditHandler{} + +// WithAudit is a decorator for a admission phase. It saves annotations +// of attribute into the audit event. Attributes passed to the Admit and +// Validate function must be instance of privateAnnotationsGetter or +// AnnotationsGetter, otherwise an error is returned. +func WithAudit(i Interface, ae *auditinternal.Event) Interface { + if i == nil { + return i + } + return &auditHandler{i, ae} +} + +func (handler auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error { + if !handler.Interface.Handles(a.GetOperation()) { + return nil + } + if err := ensureAnnotationGetter(a); err != nil { + return err + } + var err error + if mutator, ok := handler.Interface.(MutationInterface); ok { + err = mutator.Admit(ctx, a, o) + handler.logAnnotations(a) + } + return err +} + +func (handler auditHandler) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error { + if !handler.Interface.Handles(a.GetOperation()) { + return nil + } + if err := ensureAnnotationGetter(a); err != nil { + return err + } + var err error + if validator, ok := handler.Interface.(ValidationInterface); ok { + err = validator.Validate(ctx, a, o) + handler.logAnnotations(a) + } + return err +} + +func ensureAnnotationGetter(a Attributes) error { + _, okPrivate := a.(privateAnnotationsGetter) + _, okPublic := a.(AnnotationsGetter) + if okPrivate || okPublic { + return nil + } + return fmt.Errorf("attributes must be an instance of privateAnnotationsGetter or AnnotationsGetter") +} + +func (handler auditHandler) logAnnotations(a Attributes) { + if handler.ae == nil { + return + } + switch a := a.(type) { + case privateAnnotationsGetter: + for key, value := range a.getAnnotations(handler.ae.Level) { + audit.LogAnnotation(handler.ae, key, value) + } + case AnnotationsGetter: + for key, value := range a.GetAnnotations(handler.ae.Level) { + audit.LogAnnotation(handler.ae, key, value) + } + default: + // this will never happen, because we have already checked it in ensureAnnotationGetter + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/chain.go b/vendor/k8s.io/apiserver/pkg/admission/chain.go new file mode 100644 index 0000000000..f2af01ef3c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/chain.go @@ -0,0 +1,70 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import "context" + +// chainAdmissionHandler is an instance of admission.NamedHandler that performs admission control using +// a chain of admission handlers +type chainAdmissionHandler []Interface + +// NewChainHandler creates a new chain handler from an array of handlers. Used for testing. +func NewChainHandler(handlers ...Interface) chainAdmissionHandler { + return chainAdmissionHandler(handlers) +} + +// Admit performs an admission control check using a chain of handlers, and returns immediately on first error +func (admissionHandler chainAdmissionHandler) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error { + for _, handler := range admissionHandler { + if !handler.Handles(a.GetOperation()) { + continue + } + if mutator, ok := handler.(MutationInterface); ok { + err := mutator.Admit(ctx, a, o) + if err != nil { + return err + } + } + } + return nil +} + +// Validate performs an admission control check using a chain of handlers, and returns immediately on first error +func (admissionHandler chainAdmissionHandler) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error { + for _, handler := range admissionHandler { + if !handler.Handles(a.GetOperation()) { + continue + } + if validator, ok := handler.(ValidationInterface); ok { + err := validator.Validate(ctx, a, o) + if err != nil { + return err + } + } + } + return nil +} + +// Handles will return true if any of the handlers handles the given operation +func (admissionHandler chainAdmissionHandler) Handles(operation Operation) bool { + for _, handler := range admissionHandler { + if handler.Handles(operation) { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/config.go b/vendor/k8s.io/apiserver/pkg/admission/config.go new file mode 100644 index 0000000000..43613321b9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/config.go @@ -0,0 +1,175 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + + "k8s.io/klog/v2" + "sigs.k8s.io/yaml" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/apis/apiserver" + apiserverv1 "k8s.io/apiserver/pkg/apis/apiserver/v1" +) + +func makeAbs(path, base string) (string, error) { + if filepath.IsAbs(path) { + return path, nil + } + if len(base) == 0 || base == "." { + cwd, err := os.Getwd() + if err != nil { + return "", err + } + base = cwd + } + return filepath.Join(base, path), nil +} + +// ReadAdmissionConfiguration reads the admission configuration at the specified path. +// It returns the loaded admission configuration if the input file aligns with the required syntax. +// If it does not align with the provided syntax, it returns a default configuration for the enumerated +// set of pluginNames whose config location references the specified configFilePath. +// It does this to preserve backward compatibility when admission control files were opaque. +// It returns an error if the file did not exist. +func ReadAdmissionConfiguration(pluginNames []string, configFilePath string, configScheme *runtime.Scheme) (ConfigProvider, error) { + if configFilePath == "" { + return configProvider{config: &apiserver.AdmissionConfiguration{}}, nil + } + // a file was provided, so we just read it. + data, err := ioutil.ReadFile(configFilePath) + if err != nil { + return nil, fmt.Errorf("unable to read admission control configuration from %q [%v]", configFilePath, err) + } + codecs := serializer.NewCodecFactory(configScheme) + decoder := codecs.UniversalDecoder() + decodedObj, err := runtime.Decode(decoder, data) + // we were able to decode the file successfully + if err == nil { + decodedConfig, ok := decodedObj.(*apiserver.AdmissionConfiguration) + if !ok { + return nil, fmt.Errorf("unexpected type: %T", decodedObj) + } + baseDir := path.Dir(configFilePath) + for i := range decodedConfig.Plugins { + if decodedConfig.Plugins[i].Path == "" { + continue + } + // we update relative file paths to absolute paths + absPath, err := makeAbs(decodedConfig.Plugins[i].Path, baseDir) + if err != nil { + return nil, err + } + decodedConfig.Plugins[i].Path = absPath + } + return configProvider{ + config: decodedConfig, + }, nil + } + // we got an error where the decode wasn't related to a missing type + if !(runtime.IsMissingVersion(err) || runtime.IsMissingKind(err) || runtime.IsNotRegisteredError(err)) { + return nil, err + } + + // Only tolerate load errors if the file appears to be one of the two legacy plugin configs + unstructuredData := map[string]interface{}{} + if err2 := yaml.Unmarshal(data, &unstructuredData); err2 != nil { + return nil, err + } + _, isLegacyImagePolicy := unstructuredData["imagePolicy"] + _, isLegacyPodNodeSelector := unstructuredData["podNodeSelectorPluginConfig"] + if !isLegacyImagePolicy && !isLegacyPodNodeSelector { + return nil, err + } + + // convert the legacy format to the new admission control format + // in order to preserve backwards compatibility, we set plugins that + // previously read input from a non-versioned file configuration to the + // current input file. + legacyPluginsWithUnversionedConfig := sets.NewString("ImagePolicyWebhook", "PodNodeSelector") + externalConfig := &apiserverv1.AdmissionConfiguration{} + for _, pluginName := range pluginNames { + if legacyPluginsWithUnversionedConfig.Has(pluginName) { + externalConfig.Plugins = append(externalConfig.Plugins, + apiserverv1.AdmissionPluginConfiguration{ + Name: pluginName, + Path: configFilePath}) + } + } + configScheme.Default(externalConfig) + internalConfig := &apiserver.AdmissionConfiguration{} + if err := configScheme.Convert(externalConfig, internalConfig, nil); err != nil { + return nil, err + } + return configProvider{ + config: internalConfig, + }, nil +} + +type configProvider struct { + config *apiserver.AdmissionConfiguration +} + +// GetAdmissionPluginConfigurationFor returns a reader that holds the admission plugin configuration. +func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfiguration) (io.Reader, error) { + // if there is a nest object, return it directly + if pluginCfg.Configuration != nil { + return bytes.NewBuffer(pluginCfg.Configuration.Raw), nil + } + // there is nothing nested, so we delegate to path + if pluginCfg.Path != "" { + content, err := ioutil.ReadFile(pluginCfg.Path) + if err != nil { + klog.Fatalf("Couldn't open admission plugin configuration %s: %#v", pluginCfg.Path, err) + return nil, err + } + return bytes.NewBuffer(content), nil + } + // there is no special config at all + return nil, nil +} + +// ConfigFor returns a reader for the specified plugin. +// If no specific configuration is present, we return a nil reader. +func (p configProvider) ConfigFor(pluginName string) (io.Reader, error) { + // there is no config, so there is no potential config + if p.config == nil { + return nil, nil + } + // look for matching plugin and get configuration + for _, pluginCfg := range p.config.Plugins { + if pluginName != pluginCfg.Name { + continue + } + pluginConfig, err := GetAdmissionPluginConfigurationFor(pluginCfg) + if err != nil { + return nil, err + } + return pluginConfig, nil + } + // there is no registered config that matches on plugin name. + return nil, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/configuration_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/configuration_manager.go new file mode 100644 index 0000000000..4c4bf74c92 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/configuration_manager.go @@ -0,0 +1,166 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" +) + +const ( + defaultInterval = 1 * time.Second + defaultFailureThreshold = 5 + defaultBootstrapRetries = 5 + defaultBootstrapGraceperiod = 5 * time.Second +) + +var ( + ErrNotReady = fmt.Errorf("configuration is not ready") + ErrDisabled = fmt.Errorf("disabled") +) + +type getFunc func() (runtime.Object, error) + +// When running, poller calls `get` every `interval`. If `get` is +// successful, `Ready()` returns ready and `configuration()` returns the +// `mergedConfiguration`; if `get` has failed more than `failureThreshold ` times, +// `Ready()` returns not ready and `configuration()` returns nil configuration. +// In an HA setup, the poller is consistent only if the `get` is +// doing consistent read. +type poller struct { + // a function to consistently read the latest configuration + get getFunc + // consistent read interval + // read-only + interval time.Duration + // if the number of consecutive read failure equals or exceeds the failureThreshold , the + // configuration is regarded as not ready. + // read-only + failureThreshold int + // number of consecutive failures so far. + failures int + // If the poller has passed the bootstrap phase. The poller is considered + // bootstrapped either bootstrapGracePeriod after the first call of + // configuration(), or when setConfigurationAndReady() is called, whichever + // comes first. + bootstrapped bool + // configuration() retries bootstrapRetries times if poller is not bootstrapped + // read-only + bootstrapRetries int + // Grace period for bootstrapping + // read-only + bootstrapGracePeriod time.Duration + once sync.Once + // if the configuration is regarded as ready. + ready bool + mergedConfiguration runtime.Object + lastErr error + // lock must be hold when reading/writing the data fields of poller. + lock sync.RWMutex +} + +func newPoller(get getFunc) *poller { + p := poller{ + get: get, + interval: defaultInterval, + failureThreshold: defaultFailureThreshold, + bootstrapRetries: defaultBootstrapRetries, + bootstrapGracePeriod: defaultBootstrapGraceperiod, + } + return &p +} + +func (a *poller) lastError(err error) { + a.lock.Lock() + defer a.lock.Unlock() + a.lastErr = err +} + +func (a *poller) notReady() { + a.lock.Lock() + defer a.lock.Unlock() + a.ready = false +} + +func (a *poller) bootstrapping() { + // bootstrapGracePeriod is read-only, so no lock is required + timer := time.NewTimer(a.bootstrapGracePeriod) + go func() { + defer timer.Stop() + <-timer.C + a.lock.Lock() + defer a.lock.Unlock() + a.bootstrapped = true + }() +} + +// If the poller is not bootstrapped yet, the configuration() gets a few chances +// to retry. This hides transient failures during system startup. +func (a *poller) configuration() (runtime.Object, error) { + a.once.Do(a.bootstrapping) + a.lock.RLock() + defer a.lock.RUnlock() + retries := 1 + if !a.bootstrapped { + retries = a.bootstrapRetries + } + for count := 0; count < retries; count++ { + if count > 0 { + a.lock.RUnlock() + time.Sleep(a.interval) + a.lock.RLock() + } + if a.ready { + return a.mergedConfiguration, nil + } + } + if a.lastErr != nil { + return nil, a.lastErr + } + return nil, ErrNotReady +} + +func (a *poller) setConfigurationAndReady(value runtime.Object) { + a.lock.Lock() + defer a.lock.Unlock() + a.bootstrapped = true + a.mergedConfiguration = value + a.ready = true + a.lastErr = nil +} + +func (a *poller) Run(stopCh <-chan struct{}) { + go wait.Until(a.sync, a.interval, stopCh) +} + +func (a *poller) sync() { + configuration, err := a.get() + if err != nil { + a.failures++ + a.lastError(err) + if a.failures >= a.failureThreshold { + a.notReady() + } + return + } + a.failures = 0 + a.setConfigurationAndReady(configuration) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go new file mode 100644 index 0000000000..d9b28ad785 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go @@ -0,0 +1,106 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "fmt" + "sort" + "sync/atomic" + + "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" + "k8s.io/client-go/informers" + admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1" + "k8s.io/client-go/tools/cache" +) + +// mutatingWebhookConfigurationManager collects the mutating webhook objects so that they can be called. +type mutatingWebhookConfigurationManager struct { + configuration *atomic.Value + lister admissionregistrationlisters.MutatingWebhookConfigurationLister + hasSynced func() bool +} + +var _ generic.Source = &mutatingWebhookConfigurationManager{} + +func NewMutatingWebhookConfigurationManager(f informers.SharedInformerFactory) generic.Source { + informer := f.Admissionregistration().V1().MutatingWebhookConfigurations() + manager := &mutatingWebhookConfigurationManager{ + configuration: &atomic.Value{}, + lister: informer.Lister(), + hasSynced: informer.Informer().HasSynced, + } + + // Start with an empty list + manager.configuration.Store([]webhook.WebhookAccessor{}) + + // On any change, rebuild the config + informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { manager.updateConfiguration() }, + UpdateFunc: func(_, _ interface{}) { manager.updateConfiguration() }, + DeleteFunc: func(_ interface{}) { manager.updateConfiguration() }, + }) + + return manager +} + +// Webhooks returns the merged MutatingWebhookConfiguration. +func (m *mutatingWebhookConfigurationManager) Webhooks() []webhook.WebhookAccessor { + return m.configuration.Load().([]webhook.WebhookAccessor) +} + +func (m *mutatingWebhookConfigurationManager) HasSynced() bool { + return m.hasSynced() +} + +func (m *mutatingWebhookConfigurationManager) updateConfiguration() { + configurations, err := m.lister.List(labels.Everything()) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error updating configuration: %v", err)) + return + } + m.configuration.Store(mergeMutatingWebhookConfigurations(configurations)) +} + +func mergeMutatingWebhookConfigurations(configurations []*v1.MutatingWebhookConfiguration) []webhook.WebhookAccessor { + // The internal order of webhooks for each configuration is provided by the user + // but configurations themselves can be in any order. As we are going to run these + // webhooks in serial, they are sorted here to have a deterministic order. + sort.SliceStable(configurations, MutatingWebhookConfigurationSorter(configurations).ByName) + accessors := []webhook.WebhookAccessor{} + for _, c := range configurations { + // webhook names are not validated for uniqueness, so we check for duplicates and + // add a int suffix to distinguish between them + names := map[string]int{} + for i := range c.Webhooks { + n := c.Webhooks[i].Name + uid := fmt.Sprintf("%s/%s/%d", c.Name, n, names[n]) + names[n]++ + accessors = append(accessors, webhook.NewMutatingWebhookAccessor(uid, c.Name, &c.Webhooks[i])) + } + } + return accessors +} + +type MutatingWebhookConfigurationSorter []*v1.MutatingWebhookConfiguration + +func (a MutatingWebhookConfigurationSorter) ByName(i, j int) bool { + return a[i].Name < a[j].Name +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go new file mode 100644 index 0000000000..37062b082e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go @@ -0,0 +1,104 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "fmt" + "sort" + "sync/atomic" + + "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" + "k8s.io/client-go/informers" + admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1" + "k8s.io/client-go/tools/cache" +) + +// validatingWebhookConfigurationManager collects the validating webhook objects so that they can be called. +type validatingWebhookConfigurationManager struct { + configuration *atomic.Value + lister admissionregistrationlisters.ValidatingWebhookConfigurationLister + hasSynced func() bool +} + +var _ generic.Source = &validatingWebhookConfigurationManager{} + +func NewValidatingWebhookConfigurationManager(f informers.SharedInformerFactory) generic.Source { + informer := f.Admissionregistration().V1().ValidatingWebhookConfigurations() + manager := &validatingWebhookConfigurationManager{ + configuration: &atomic.Value{}, + lister: informer.Lister(), + hasSynced: informer.Informer().HasSynced, + } + + // Start with an empty list + manager.configuration.Store([]webhook.WebhookAccessor{}) + + // On any change, rebuild the config + informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { manager.updateConfiguration() }, + UpdateFunc: func(_, _ interface{}) { manager.updateConfiguration() }, + DeleteFunc: func(_ interface{}) { manager.updateConfiguration() }, + }) + + return manager +} + +// Webhooks returns the merged ValidatingWebhookConfiguration. +func (v *validatingWebhookConfigurationManager) Webhooks() []webhook.WebhookAccessor { + return v.configuration.Load().([]webhook.WebhookAccessor) +} + +// HasSynced returns true if the shared informers have synced. +func (v *validatingWebhookConfigurationManager) HasSynced() bool { + return v.hasSynced() +} + +func (v *validatingWebhookConfigurationManager) updateConfiguration() { + configurations, err := v.lister.List(labels.Everything()) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error updating configuration: %v", err)) + return + } + v.configuration.Store(mergeValidatingWebhookConfigurations(configurations)) +} + +func mergeValidatingWebhookConfigurations(configurations []*v1.ValidatingWebhookConfiguration) []webhook.WebhookAccessor { + sort.SliceStable(configurations, ValidatingWebhookConfigurationSorter(configurations).ByName) + accessors := []webhook.WebhookAccessor{} + for _, c := range configurations { + // webhook names are not validated for uniqueness, so we check for duplicates and + // add a int suffix to distinguish between them + names := map[string]int{} + for i := range c.Webhooks { + n := c.Webhooks[i].Name + uid := fmt.Sprintf("%s/%s/%d", c.Name, n, names[n]) + names[n]++ + accessors = append(accessors, webhook.NewValidatingWebhookAccessor(uid, c.Name, &c.Webhooks[i])) + } + } + return accessors +} + +type ValidatingWebhookConfigurationSorter []*v1.ValidatingWebhookConfiguration + +func (a ValidatingWebhookConfigurationSorter) ByName(i, j int) bool { + return a[i].Name < a[j].Name +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/decorator.go b/vendor/k8s.io/apiserver/pkg/admission/decorator.go new file mode 100644 index 0000000000..a4b0b28b55 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/decorator.go @@ -0,0 +1,39 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +type Decorator interface { + Decorate(handler Interface, name string) Interface +} + +type DecoratorFunc func(handler Interface, name string) Interface + +func (d DecoratorFunc) Decorate(handler Interface, name string) Interface { + return d(handler, name) +} + +type Decorators []Decorator + +// Decorate applies the decorator in inside-out order, i.e. the first decorator in the slice is first applied to the given handler. +func (d Decorators) Decorate(handler Interface, name string) Interface { + result := handler + for _, d := range d { + result = d.Decorate(result, name) + } + + return result +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/errors.go b/vendor/k8s.io/apiserver/pkg/admission/errors.go new file mode 100644 index 0000000000..9a069a2c99 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/errors.go @@ -0,0 +1,72 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +func extractResourceName(a Attributes) (name string, resource schema.GroupResource, err error) { + resource = a.GetResource().GroupResource() + + if len(a.GetName()) > 0 { + return a.GetName(), resource, nil + } + + name = "Unknown" + obj := a.GetObject() + if obj != nil { + accessor, err := meta.Accessor(obj) + if err != nil { + // not all object have ObjectMeta. If we don't, return a name with a slash (always illegal) + return "Unknown/errorGettingName", resource, nil + } + + // this is necessary because name object name generation has not occurred yet + if len(accessor.GetName()) > 0 { + name = accessor.GetName() + } else if len(accessor.GetGenerateName()) > 0 { + name = accessor.GetGenerateName() + } + } + return name, resource, nil +} + +// NewForbidden is a utility function to return a well-formatted admission control error response +func NewForbidden(a Attributes, internalError error) error { + // do not double wrap an error of same type + if apierrors.IsForbidden(internalError) { + return internalError + } + name, resource, err := extractResourceName(a) + if err != nil { + return apierrors.NewInternalError(utilerrors.NewAggregate([]error{internalError, err})) + } + return apierrors.NewForbidden(resource, name, internalError) +} + +// NewNotFound is a utility function to return a well-formatted admission control error response +func NewNotFound(a Attributes) error { + name, resource, err := extractResourceName(a) + if err != nil { + return apierrors.NewInternalError(err) + } + return apierrors.NewNotFound(resource, name) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/handler.go b/vendor/k8s.io/apiserver/pkg/admission/handler.go new file mode 100644 index 0000000000..d2a9e7d4cf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/handler.go @@ -0,0 +1,79 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "time" + + "k8s.io/apimachinery/pkg/util/sets" +) + +const ( + // timeToWaitForReady is the amount of time to wait to let an admission controller to be ready to satisfy a request. + // this is useful when admission controllers need to warm their caches before letting requests through. + timeToWaitForReady = 10 * time.Second +) + +// ReadyFunc is a function that returns true if the admission controller is ready to handle requests. +type ReadyFunc func() bool + +// Handler is a base for admission control handlers that +// support a predefined set of operations +type Handler struct { + operations sets.String + readyFunc ReadyFunc +} + +// Handles returns true for methods that this handler supports +func (h *Handler) Handles(operation Operation) bool { + return h.operations.Has(string(operation)) +} + +// NewHandler creates a new base handler that handles the passed +// in operations +func NewHandler(ops ...Operation) *Handler { + operations := sets.NewString() + for _, op := range ops { + operations.Insert(string(op)) + } + return &Handler{ + operations: operations, + } +} + +// SetReadyFunc allows late registration of a ReadyFunc to know if the handler is ready to process requests. +func (h *Handler) SetReadyFunc(readyFunc ReadyFunc) { + h.readyFunc = readyFunc +} + +// WaitForReady will wait for the readyFunc (if registered) to return ready, and in case of timeout, will return false. +func (h *Handler) WaitForReady() bool { + // there is no ready func configured, so we return immediately + if h.readyFunc == nil { + return true + } + + timeout := time.After(timeToWaitForReady) + for !h.readyFunc() { + select { + case <-time.After(100 * time.Millisecond): + case <-timeout: + return h.readyFunc() + } + } + return true +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go b/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go new file mode 100644 index 0000000000..613baf8efa --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package initializer + +import ( + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/component-base/featuregate" +) + +type pluginInitializer struct { + externalClient kubernetes.Interface + externalInformers informers.SharedInformerFactory + authorizer authorizer.Authorizer + featureGates featuregate.FeatureGate +} + +// New creates an instance of admission plugins initializer. +// This constructor is public with a long param list so that callers immediately know that new information can be expected +// during compilation when they update a level. +func New( + extClientset kubernetes.Interface, + extInformers informers.SharedInformerFactory, + authz authorizer.Authorizer, + featureGates featuregate.FeatureGate, +) pluginInitializer { + return pluginInitializer{ + externalClient: extClientset, + externalInformers: extInformers, + authorizer: authz, + featureGates: featureGates, + } +} + +// Initialize checks the initialization interfaces implemented by a plugin +// and provide the appropriate initialization data +func (i pluginInitializer) Initialize(plugin admission.Interface) { + // First tell the plugin about enabled features, so it can decide whether to start informers or not + if wants, ok := plugin.(WantsFeatures); ok { + wants.InspectFeatureGates(i.featureGates) + } + + if wants, ok := plugin.(WantsExternalKubeClientSet); ok { + wants.SetExternalKubeClientSet(i.externalClient) + } + + if wants, ok := plugin.(WantsExternalKubeInformerFactory); ok { + wants.SetExternalKubeInformerFactory(i.externalInformers) + } + + if wants, ok := plugin.(WantsAuthorizer); ok { + wants.SetAuthorizer(i.authorizer) + } +} + +var _ admission.PluginInitializer = pluginInitializer{} diff --git a/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go b/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go new file mode 100644 index 0000000000..86a6df1c2c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go @@ -0,0 +1,61 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package initializer + +import ( + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + quota "k8s.io/apiserver/pkg/quota/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/component-base/featuregate" +) + +// WantsExternalKubeClientSet defines a function which sets external ClientSet for admission plugins that need it +type WantsExternalKubeClientSet interface { + SetExternalKubeClientSet(kubernetes.Interface) + admission.InitializationValidator +} + +// WantsExternalKubeInformerFactory defines a function which sets InformerFactory for admission plugins that need it +type WantsExternalKubeInformerFactory interface { + SetExternalKubeInformerFactory(informers.SharedInformerFactory) + admission.InitializationValidator +} + +// WantsAuthorizer defines a function which sets Authorizer for admission plugins that need it. +type WantsAuthorizer interface { + SetAuthorizer(authorizer.Authorizer) + admission.InitializationValidator +} + +// WantsQuotaConfiguration defines a function which sets quota configuration for admission plugins that need it. +type WantsQuotaConfiguration interface { + SetQuotaConfiguration(quota.Configuration) + admission.InitializationValidator +} + +// WantsFeatureGate defines a function which passes the featureGates for inspection by an admission plugin. +// Admission plugins should not hold a reference to the featureGates. Instead, they should query a particular one +// and assign it to a simple bool in the admission plugin struct. +// func (a *admissionPlugin) InspectFeatureGates(features featuregate.FeatureGate){ +// a.myFeatureIsOn = features.Enabled("my-feature") +// } +type WantsFeatures interface { + InspectFeatureGates(featuregate.FeatureGate) + admission.InitializationValidator +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/interfaces.go b/vendor/k8s.io/apiserver/pkg/admission/interfaces.go new file mode 100644 index 0000000000..8882680b2c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/interfaces.go @@ -0,0 +1,172 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authentication/user" +) + +// Attributes is an interface used by AdmissionController to get information about a request +// that is used to make an admission decision. +type Attributes interface { + // GetName returns the name of the object as presented in the request. On a CREATE operation, the client + // may omit name and rely on the server to generate the name. If that is the case, this method will return + // the empty string + GetName() string + // GetNamespace is the namespace associated with the request (if any) + GetNamespace() string + // GetResource is the name of the resource being requested. This is not the kind. For example: pods + GetResource() schema.GroupVersionResource + // GetSubresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. + // For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" + // (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding". + GetSubresource() string + // GetOperation is the operation being performed + GetOperation() Operation + // GetOperationOptions is the options for the operation being performed + GetOperationOptions() runtime.Object + // IsDryRun indicates that modifications will definitely not be persisted for this request. This is to prevent + // admission controllers with side effects and a method of reconciliation from being overwhelmed. + // However, a value of false for this does not mean that the modification will be persisted, because it + // could still be rejected by a subsequent validation step. + IsDryRun() bool + // GetObject is the object from the incoming request prior to default values being applied + GetObject() runtime.Object + // GetOldObject is the existing object. Only populated for UPDATE requests. + GetOldObject() runtime.Object + // GetKind is the type of object being manipulated. For example: Pod + GetKind() schema.GroupVersionKind + // GetUserInfo is information about the requesting user + GetUserInfo() user.Info + + // AddAnnotation sets annotation according to key-value pair. The key should be qualified, e.g., podsecuritypolicy.admission.k8s.io/admit-policy, where + // "podsecuritypolicy" is the name of the plugin, "admission.k8s.io" is the name of the organization, "admit-policy" is the key name. + // An error is returned if the format of key is invalid. When trying to overwrite annotation with a new value, an error is returned. + // Both ValidationInterface and MutationInterface are allowed to add Annotations. + // By default, an annotation gets logged into audit event if the request's audit level is greater or + // equal to Metadata. + AddAnnotation(key, value string) error + + // AddAnnotationWithLevel sets annotation according to key-value pair with additional intended audit level. + // An Annotation gets logged into audit event if the request's audit level is greater or equal to the + // intended audit level. + AddAnnotationWithLevel(key, value string, level auditinternal.Level) error + + // GetReinvocationContext tracks the admission request information relevant to the re-invocation policy. + GetReinvocationContext() ReinvocationContext +} + +// ObjectInterfaces is an interface used by AdmissionController to get object interfaces +// such as Converter or Defaulter. These interfaces are normally coming from Request Scope +// to handle special cases like CRDs. +type ObjectInterfaces interface { + // GetObjectCreater is the ObjectCreator appropriate for the requested object. + GetObjectCreater() runtime.ObjectCreater + // GetObjectTyper is the ObjectTyper appropriate for the requested object. + GetObjectTyper() runtime.ObjectTyper + // GetObjectDefaulter is the ObjectDefaulter appropriate for the requested object. + GetObjectDefaulter() runtime.ObjectDefaulter + // GetObjectConvertor is the ObjectConvertor appropriate for the requested object. + GetObjectConvertor() runtime.ObjectConvertor + // GetEquivalentResourceMapper is the EquivalentResourceMapper appropriate for finding equivalent resources and expected kind for the requested object. + GetEquivalentResourceMapper() runtime.EquivalentResourceMapper +} + +// privateAnnotationsGetter is a private interface which allows users to get annotations from Attributes. +type privateAnnotationsGetter interface { + getAnnotations(maxLevel auditinternal.Level) map[string]string +} + +// AnnotationsGetter allows users to get annotations from Attributes. An alternate Attribute should implement +// this interface. +type AnnotationsGetter interface { + GetAnnotations(maxLevel auditinternal.Level) map[string]string +} + +// ReinvocationContext provides access to the admission related state required to implement the re-invocation policy. +type ReinvocationContext interface { + // IsReinvoke returns true if the current admission check is a re-invocation. + IsReinvoke() bool + // SetIsReinvoke sets the current admission check as a re-invocation. + SetIsReinvoke() + // ShouldReinvoke returns true if any plugin has requested a re-invocation. + ShouldReinvoke() bool + // SetShouldReinvoke signals that a re-invocation is desired. + SetShouldReinvoke() + // AddValue set a value for a plugin name, possibly overriding a previous value. + SetValue(plugin string, v interface{}) + // Value reads a value for a webhook. + Value(plugin string) interface{} +} + +// Interface is an abstract, pluggable interface for Admission Control decisions. +type Interface interface { + // Handles returns true if this admission controller can handle the given operation + // where operation can be one of CREATE, UPDATE, DELETE, or CONNECT + Handles(operation Operation) bool +} + +type MutationInterface interface { + Interface + + // Admit makes an admission decision based on the request attributes. + // Context is used only for timeout/deadline/cancellation and tracing information. + Admit(ctx context.Context, a Attributes, o ObjectInterfaces) (err error) +} + +// ValidationInterface is an abstract, pluggable interface for Admission Control decisions. +type ValidationInterface interface { + Interface + + // Validate makes an admission decision based on the request attributes. It is NOT allowed to mutate + // Context is used only for timeout/deadline/cancellation and tracing information. + Validate(ctx context.Context, a Attributes, o ObjectInterfaces) (err error) +} + +// Operation is the type of resource operation being checked for admission control +type Operation string + +// Operation constants +const ( + Create Operation = "CREATE" + Update Operation = "UPDATE" + Delete Operation = "DELETE" + Connect Operation = "CONNECT" +) + +// PluginInitializer is used for initialization of shareable resources between admission plugins. +// After initialization the resources have to be set separately +type PluginInitializer interface { + Initialize(plugin Interface) +} + +// InitializationValidator holds ValidateInitialization functions, which are responsible for validation of initialized +// shared resources and should be implemented on admission plugins +type InitializationValidator interface { + ValidateInitialization() error +} + +// ConfigProvider provides a way to get configuration for an admission plugin based on its name +type ConfigProvider interface { + ConfigFor(pluginName string) (io.Reader, error) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go new file mode 100644 index 0000000000..c9edb48b41 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go @@ -0,0 +1,251 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "fmt" + "strconv" + "time" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +// WebhookRejectionErrorType defines different error types that happen in a webhook rejection. +type WebhookRejectionErrorType string + +const ( + namespace = "apiserver" + subsystem = "admission" + + // WebhookRejectionCallingWebhookError identifies a calling webhook error which causes + // a webhook admission to reject a request + WebhookRejectionCallingWebhookError WebhookRejectionErrorType = "calling_webhook_error" + // WebhookRejectionAPIServerInternalError identifies an apiserver internal error which + // causes a webhook admission to reject a request + WebhookRejectionAPIServerInternalError WebhookRejectionErrorType = "apiserver_internal_error" + // WebhookRejectionNoError identifies a webhook properly rejected a request + WebhookRejectionNoError WebhookRejectionErrorType = "no_error" +) + +var ( + // Use buckets ranging from 5 ms to 2.5 seconds (admission webhooks timeout at 30 seconds by default). + latencyBuckets = []float64{0.005, 0.025, 0.1, 0.5, 2.5} + latencySummaryMaxAge = 5 * time.Hour + + // Metrics provides access to all admission metrics. + Metrics = newAdmissionMetrics() +) + +// ObserverFunc is a func that emits metrics. +type ObserverFunc func(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) + +const ( + stepValidate = "validate" + stepAdmit = "admit" +) + +// WithControllerMetrics is a decorator for named admission handlers. +func WithControllerMetrics(i admission.Interface, name string) admission.Interface { + return WithMetrics(i, Metrics.ObserveAdmissionController, name) +} + +// WithStepMetrics is a decorator for a whole admission phase, i.e. admit or validation.admission step. +func WithStepMetrics(i admission.Interface) admission.Interface { + return WithMetrics(i, Metrics.ObserveAdmissionStep) +} + +// WithMetrics is a decorator for admission handlers with a generic observer func. +func WithMetrics(i admission.Interface, observer ObserverFunc, extraLabels ...string) admission.Interface { + return &pluginHandlerWithMetrics{ + Interface: i, + observer: observer, + extraLabels: extraLabels, + } +} + +// pluginHandlerWithMetrics decorates a admission handler with metrics. +type pluginHandlerWithMetrics struct { + admission.Interface + observer ObserverFunc + extraLabels []string +} + +// Admit performs a mutating admission control check and emit metrics. +func (p pluginHandlerWithMetrics) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + mutatingHandler, ok := p.Interface.(admission.MutationInterface) + if !ok { + return nil + } + + start := time.Now() + err := mutatingHandler.Admit(ctx, a, o) + p.observer(time.Since(start), err != nil, a, stepAdmit, p.extraLabels...) + return err +} + +// Validate performs a non-mutating admission control check and emits metrics. +func (p pluginHandlerWithMetrics) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + validatingHandler, ok := p.Interface.(admission.ValidationInterface) + if !ok { + return nil + } + + start := time.Now() + err := validatingHandler.Validate(ctx, a, o) + p.observer(time.Since(start), err != nil, a, stepValidate, p.extraLabels...) + return err +} + +// AdmissionMetrics instruments admission with prometheus metrics. +type AdmissionMetrics struct { + step *metricSet + controller *metricSet + webhook *metricSet + webhookRejection *metrics.CounterVec +} + +// newAdmissionMetrics create a new AdmissionMetrics, configured with default metric names. +func newAdmissionMetrics() *AdmissionMetrics { + // Admission metrics for a step of the admission flow. The entire admission flow is broken down into a series of steps + // Each step is identified by a distinct type label value. + step := newMetricSet("step", + []string{"type", "operation", "rejected"}, + "Admission sub-step %s, broken out for each operation and API resource and step type (validate or admit).", true) + + // Built-in admission controller metrics. Each admission controller is identified by name. + controller := newMetricSet("controller", + []string{"name", "type", "operation", "rejected"}, + "Admission controller %s, identified by name and broken out for each operation and API resource and type (validate or admit).", false) + + // Admission webhook metrics. Each webhook is identified by name. + webhook := newMetricSet("webhook", + []string{"name", "type", "operation", "rejected"}, + "Admission webhook %s, identified by name and broken out for each operation and API resource and type (validate or admit).", false) + + webhookRejection := metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "webhook_rejection_count", + Help: "Admission webhook rejection count, identified by name and broken out for each admission type (validating or admit) and operation. Additional labels specify an error type (calling_webhook_error or apiserver_internal_error if an error occurred; no_error otherwise) and optionally a non-zero rejection code if the webhook rejects the request with an HTTP status code (honored by the apiserver when the code is greater or equal to 400). Codes greater than 600 are truncated to 600, to keep the metrics cardinality bounded.", + StabilityLevel: metrics.ALPHA, + }, + []string{"name", "type", "operation", "error_type", "rejection_code"}) + + step.mustRegister() + controller.mustRegister() + webhook.mustRegister() + legacyregistry.MustRegister(webhookRejection) + return &AdmissionMetrics{step: step, controller: controller, webhook: webhook, webhookRejection: webhookRejection} +} + +func (m *AdmissionMetrics) reset() { + m.step.reset() + m.controller.reset() + m.webhook.reset() +} + +// ObserveAdmissionStep records admission related metrics for a admission step, identified by step type. +func (m *AdmissionMetrics) ObserveAdmissionStep(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { + m.step.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) +} + +// ObserveAdmissionController records admission related metrics for a built-in admission controller, identified by it's plugin handler name. +func (m *AdmissionMetrics) ObserveAdmissionController(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { + m.controller.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) +} + +// ObserveWebhook records admission related metrics for a admission webhook. +func (m *AdmissionMetrics) ObserveWebhook(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { + m.webhook.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) +} + +// ObserveWebhookRejection records admission related metrics for an admission webhook rejection. +func (m *AdmissionMetrics) ObserveWebhookRejection(name, stepType, operation string, errorType WebhookRejectionErrorType, rejectionCode int) { + // We truncate codes greater than 600 to keep the cardinality bounded. + // This should be rarely done by a malfunctioning webhook server. + if rejectionCode > 600 { + rejectionCode = 600 + } + m.webhookRejection.WithLabelValues(name, stepType, operation, string(errorType), strconv.Itoa(rejectionCode)).Inc() +} + +type metricSet struct { + latencies *metrics.HistogramVec + latenciesSummary *metrics.SummaryVec +} + +func newMetricSet(name string, labels []string, helpTemplate string, hasSummary bool) *metricSet { + var summary *metrics.SummaryVec + if hasSummary { + summary = metrics.NewSummaryVec( + &metrics.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: fmt.Sprintf("%s_admission_duration_seconds_summary", name), + Help: fmt.Sprintf(helpTemplate, "latency summary in seconds"), + MaxAge: latencySummaryMaxAge, + StabilityLevel: metrics.ALPHA, + }, + labels, + ) + } + + return &metricSet{ + latencies: metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: fmt.Sprintf("%s_admission_duration_seconds", name), + Help: fmt.Sprintf(helpTemplate, "latency histogram in seconds"), + Buckets: latencyBuckets, + StabilityLevel: metrics.ALPHA, + }, + labels, + ), + + latenciesSummary: summary, + } +} + +// MustRegister registers all the prometheus metrics in the metricSet. +func (m *metricSet) mustRegister() { + legacyregistry.MustRegister(m.latencies) + if m.latenciesSummary != nil { + legacyregistry.MustRegister(m.latenciesSummary) + } +} + +// Reset resets all the prometheus metrics in the metricSet. +func (m *metricSet) reset() { + m.latencies.Reset() + if m.latenciesSummary != nil { + m.latenciesSummary.Reset() + } +} + +// Observe records an observed admission event to all metrics in the metricSet. +func (m *metricSet) observe(elapsed time.Duration, labels ...string) { + elapsedSeconds := elapsed.Seconds() + m.latencies.WithLabelValues(labels...).Observe(elapsedSeconds) + if m.latenciesSummary != nil { + m.latenciesSummary.WithLabelValues(labels...).Observe(elapsedSeconds) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go new file mode 100644 index 0000000000..0fac569c4f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -0,0 +1,232 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifecycle + +import ( + "context" + "fmt" + "io" + "time" + + "k8s.io/klog/v2" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + utilcache "k8s.io/apimachinery/pkg/util/cache" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" +) + +const ( + // PluginName indicates the name of admission plug-in + PluginName = "NamespaceLifecycle" + // how long a namespace stays in the force live lookup cache before expiration. + forceLiveLookupTTL = 30 * time.Second + // how long to wait for a missing namespace before re-checking the cache (and then doing a live lookup) + // this accomplishes two things: + // 1. It allows a watch-fed cache time to observe a namespace creation event + // 2. It allows time for a namespace creation to distribute to members of a storage cluster, + // so the live lookup has a better chance of succeeding even if it isn't performed against the leader. + missingNamespaceWait = 50 * time.Millisecond +) + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return NewLifecycle(sets.NewString(metav1.NamespaceDefault, metav1.NamespaceSystem, metav1.NamespacePublic)) + }) +} + +// Lifecycle is an implementation of admission.Interface. +// It enforces life-cycle constraints around a Namespace depending on its Phase +type Lifecycle struct { + *admission.Handler + client kubernetes.Interface + immortalNamespaces sets.String + namespaceLister corelisters.NamespaceLister + // forceLiveLookupCache holds a list of entries for namespaces that we have a strong reason to believe are stale in our local cache. + // if a namespace is in this cache, then we will ignore our local state and always fetch latest from api server. + forceLiveLookupCache *utilcache.LRUExpireCache +} + +var _ = initializer.WantsExternalKubeInformerFactory(&Lifecycle{}) +var _ = initializer.WantsExternalKubeClientSet(&Lifecycle{}) + +// Admit makes an admission decision based on the request attributes +func (l *Lifecycle) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + // prevent deletion of immortal namespaces + if a.GetOperation() == admission.Delete && a.GetKind().GroupKind() == v1.SchemeGroupVersion.WithKind("Namespace").GroupKind() && l.immortalNamespaces.Has(a.GetName()) { + return errors.NewForbidden(a.GetResource().GroupResource(), a.GetName(), fmt.Errorf("this namespace may not be deleted")) + } + + // always allow non-namespaced resources + if len(a.GetNamespace()) == 0 && a.GetKind().GroupKind() != v1.SchemeGroupVersion.WithKind("Namespace").GroupKind() { + return nil + } + + if a.GetKind().GroupKind() == v1.SchemeGroupVersion.WithKind("Namespace").GroupKind() { + // if a namespace is deleted, we want to prevent all further creates into it + // while it is undergoing termination. to reduce incidences where the cache + // is slow to update, we add the namespace into a force live lookup list to ensure + // we are not looking at stale state. + if a.GetOperation() == admission.Delete { + l.forceLiveLookupCache.Add(a.GetName(), true, forceLiveLookupTTL) + } + // allow all operations to namespaces + return nil + } + + // always allow deletion of other resources + if a.GetOperation() == admission.Delete { + return nil + } + + // always allow access review checks. Returning status about the namespace would be leaking information + if isAccessReview(a) { + return nil + } + + // we need to wait for our caches to warm + if !l.WaitForReady() { + return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request")) + } + + var ( + exists bool + err error + ) + + namespace, err := l.namespaceLister.Get(a.GetNamespace()) + if err != nil { + if !errors.IsNotFound(err) { + return errors.NewInternalError(err) + } + } else { + exists = true + } + + if !exists && a.GetOperation() == admission.Create { + // give the cache time to observe the namespace before rejecting a create. + // this helps when creating a namespace and immediately creating objects within it. + time.Sleep(missingNamespaceWait) + namespace, err = l.namespaceLister.Get(a.GetNamespace()) + switch { + case errors.IsNotFound(err): + // no-op + case err != nil: + return errors.NewInternalError(err) + default: + exists = true + } + if exists { + klog.V(4).Infof("found %s in cache after waiting", a.GetNamespace()) + } + } + + // forceLiveLookup if true will skip looking at local cache state and instead always make a live call to server. + forceLiveLookup := false + if _, ok := l.forceLiveLookupCache.Get(a.GetNamespace()); ok { + // we think the namespace was marked for deletion, but our current local cache says otherwise, we will force a live lookup. + forceLiveLookup = exists && namespace.Status.Phase == v1.NamespaceActive + } + + // refuse to operate on non-existent namespaces + if !exists || forceLiveLookup { + // as a last resort, make a call directly to storage + namespace, err = l.client.CoreV1().Namespaces().Get(context.TODO(), a.GetNamespace(), metav1.GetOptions{}) + switch { + case errors.IsNotFound(err): + return err + case err != nil: + return errors.NewInternalError(err) + } + klog.V(4).Infof("found %s via storage lookup", a.GetNamespace()) + } + + // ensure that we're not trying to create objects in terminating namespaces + if a.GetOperation() == admission.Create { + if namespace.Status.Phase != v1.NamespaceTerminating { + return nil + } + + err := admission.NewForbidden(a, fmt.Errorf("unable to create new content in namespace %s because it is being terminated", a.GetNamespace())) + if apierr, ok := err.(*errors.StatusError); ok { + apierr.ErrStatus.Details.Causes = append(apierr.ErrStatus.Details.Causes, metav1.StatusCause{ + Type: v1.NamespaceTerminatingCause, + Message: fmt.Sprintf("namespace %s is being terminated", a.GetNamespace()), + Field: "metadata.namespace", + }) + } + return err + } + + return nil +} + +// NewLifecycle creates a new namespace Lifecycle admission control handler +func NewLifecycle(immortalNamespaces sets.String) (*Lifecycle, error) { + return newLifecycleWithClock(immortalNamespaces, clock.RealClock{}) +} + +func newLifecycleWithClock(immortalNamespaces sets.String, clock utilcache.Clock) (*Lifecycle, error) { + forceLiveLookupCache := utilcache.NewLRUExpireCacheWithClock(100, clock) + return &Lifecycle{ + Handler: admission.NewHandler(admission.Create, admission.Update, admission.Delete), + immortalNamespaces: immortalNamespaces, + forceLiveLookupCache: forceLiveLookupCache, + }, nil +} + +// SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. +func (l *Lifecycle) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + namespaceInformer := f.Core().V1().Namespaces() + l.namespaceLister = namespaceInformer.Lister() + l.SetReadyFunc(namespaceInformer.Informer().HasSynced) +} + +// SetExternalKubeClientSet implements the WantsExternalKubeClientSet interface. +func (l *Lifecycle) SetExternalKubeClientSet(client kubernetes.Interface) { + l.client = client +} + +// ValidateInitialization implements the InitializationValidator interface. +func (l *Lifecycle) ValidateInitialization() error { + if l.namespaceLister == nil { + return fmt.Errorf("missing namespaceLister") + } + if l.client == nil { + return fmt.Errorf("missing client") + } + return nil +} + +// accessReviewResources are resources which give a view into permissions in a namespace. Users must be allowed to create these +// resources because returning "not found" errors allows someone to search for the "people I'm going to fire in 2017" namespace. +var accessReviewResources = map[schema.GroupResource]bool{ + {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: true, +} + +func isAccessReview(a admission.Attributes) bool { + return accessReviewResources[a.GetResource().GroupResource()] +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go new file mode 100644 index 0000000000..0cbc21c826 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go @@ -0,0 +1,297 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "sync" + + "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + webhookutil "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/client-go/rest" +) + +// WebhookAccessor provides a common interface to both mutating and validating webhook types. +type WebhookAccessor interface { + // GetUID gets a string that uniquely identifies the webhook. + GetUID() string + + // GetConfigurationName gets the name of the webhook configuration that owns this webhook. + GetConfigurationName() string + + // GetRESTClient gets the webhook client + GetRESTClient(clientManager *webhookutil.ClientManager) (*rest.RESTClient, error) + // GetParsedNamespaceSelector gets the webhook NamespaceSelector field. + GetParsedNamespaceSelector() (labels.Selector, error) + // GetParsedObjectSelector gets the webhook ObjectSelector field. + GetParsedObjectSelector() (labels.Selector, error) + + // GetName gets the webhook Name field. Note that the name is scoped to the webhook + // configuration and does not provide a globally unique identity, if a unique identity is + // needed, use GetUID. + GetName() string + // GetClientConfig gets the webhook ClientConfig field. + GetClientConfig() v1.WebhookClientConfig + // GetRules gets the webhook Rules field. + GetRules() []v1.RuleWithOperations + // GetFailurePolicy gets the webhook FailurePolicy field. + GetFailurePolicy() *v1.FailurePolicyType + // GetMatchPolicy gets the webhook MatchPolicy field. + GetMatchPolicy() *v1.MatchPolicyType + // GetNamespaceSelector gets the webhook NamespaceSelector field. + GetNamespaceSelector() *metav1.LabelSelector + // GetObjectSelector gets the webhook ObjectSelector field. + GetObjectSelector() *metav1.LabelSelector + // GetSideEffects gets the webhook SideEffects field. + GetSideEffects() *v1.SideEffectClass + // GetTimeoutSeconds gets the webhook TimeoutSeconds field. + GetTimeoutSeconds() *int32 + // GetAdmissionReviewVersions gets the webhook AdmissionReviewVersions field. + GetAdmissionReviewVersions() []string + + // GetMutatingWebhook if the accessor contains a MutatingWebhook, returns it and true, else returns false. + GetMutatingWebhook() (*v1.MutatingWebhook, bool) + // GetValidatingWebhook if the accessor contains a ValidatingWebhook, returns it and true, else returns false. + GetValidatingWebhook() (*v1.ValidatingWebhook, bool) +} + +// NewMutatingWebhookAccessor creates an accessor for a MutatingWebhook. +func NewMutatingWebhookAccessor(uid, configurationName string, h *v1.MutatingWebhook) WebhookAccessor { + return &mutatingWebhookAccessor{uid: uid, configurationName: configurationName, MutatingWebhook: h} +} + +type mutatingWebhookAccessor struct { + *v1.MutatingWebhook + uid string + configurationName string + + initObjectSelector sync.Once + objectSelector labels.Selector + objectSelectorErr error + + initNamespaceSelector sync.Once + namespaceSelector labels.Selector + namespaceSelectorErr error + + initClient sync.Once + client *rest.RESTClient + clientErr error +} + +func (m *mutatingWebhookAccessor) GetUID() string { + return m.uid +} + +func (m *mutatingWebhookAccessor) GetConfigurationName() string { + return m.configurationName +} + +func (m *mutatingWebhookAccessor) GetRESTClient(clientManager *webhookutil.ClientManager) (*rest.RESTClient, error) { + m.initClient.Do(func() { + m.client, m.clientErr = clientManager.HookClient(hookClientConfigForWebhook(m)) + }) + return m.client, m.clientErr +} + +func (m *mutatingWebhookAccessor) GetParsedNamespaceSelector() (labels.Selector, error) { + m.initNamespaceSelector.Do(func() { + m.namespaceSelector, m.namespaceSelectorErr = metav1.LabelSelectorAsSelector(m.NamespaceSelector) + }) + return m.namespaceSelector, m.namespaceSelectorErr +} + +func (m *mutatingWebhookAccessor) GetParsedObjectSelector() (labels.Selector, error) { + m.initObjectSelector.Do(func() { + m.objectSelector, m.objectSelectorErr = metav1.LabelSelectorAsSelector(m.ObjectSelector) + }) + return m.objectSelector, m.objectSelectorErr +} + +func (m *mutatingWebhookAccessor) GetName() string { + return m.Name +} + +func (m *mutatingWebhookAccessor) GetClientConfig() v1.WebhookClientConfig { + return m.ClientConfig +} + +func (m *mutatingWebhookAccessor) GetRules() []v1.RuleWithOperations { + return m.Rules +} + +func (m *mutatingWebhookAccessor) GetFailurePolicy() *v1.FailurePolicyType { + return m.FailurePolicy +} + +func (m *mutatingWebhookAccessor) GetMatchPolicy() *v1.MatchPolicyType { + return m.MatchPolicy +} + +func (m *mutatingWebhookAccessor) GetNamespaceSelector() *metav1.LabelSelector { + return m.NamespaceSelector +} + +func (m *mutatingWebhookAccessor) GetObjectSelector() *metav1.LabelSelector { + return m.ObjectSelector +} + +func (m *mutatingWebhookAccessor) GetSideEffects() *v1.SideEffectClass { + return m.SideEffects +} + +func (m *mutatingWebhookAccessor) GetTimeoutSeconds() *int32 { + return m.TimeoutSeconds +} + +func (m *mutatingWebhookAccessor) GetAdmissionReviewVersions() []string { + return m.AdmissionReviewVersions +} + +func (m *mutatingWebhookAccessor) GetMutatingWebhook() (*v1.MutatingWebhook, bool) { + return m.MutatingWebhook, true +} + +func (m *mutatingWebhookAccessor) GetValidatingWebhook() (*v1.ValidatingWebhook, bool) { + return nil, false +} + +// NewValidatingWebhookAccessor creates an accessor for a ValidatingWebhook. +func NewValidatingWebhookAccessor(uid, configurationName string, h *v1.ValidatingWebhook) WebhookAccessor { + return &validatingWebhookAccessor{uid: uid, configurationName: configurationName, ValidatingWebhook: h} +} + +type validatingWebhookAccessor struct { + *v1.ValidatingWebhook + uid string + configurationName string + + initObjectSelector sync.Once + objectSelector labels.Selector + objectSelectorErr error + + initNamespaceSelector sync.Once + namespaceSelector labels.Selector + namespaceSelectorErr error + + initClient sync.Once + client *rest.RESTClient + clientErr error +} + +func (v *validatingWebhookAccessor) GetUID() string { + return v.uid +} + +func (v *validatingWebhookAccessor) GetConfigurationName() string { + return v.configurationName +} + +func (v *validatingWebhookAccessor) GetRESTClient(clientManager *webhookutil.ClientManager) (*rest.RESTClient, error) { + v.initClient.Do(func() { + v.client, v.clientErr = clientManager.HookClient(hookClientConfigForWebhook(v)) + }) + return v.client, v.clientErr +} + +func (v *validatingWebhookAccessor) GetParsedNamespaceSelector() (labels.Selector, error) { + v.initNamespaceSelector.Do(func() { + v.namespaceSelector, v.namespaceSelectorErr = metav1.LabelSelectorAsSelector(v.NamespaceSelector) + }) + return v.namespaceSelector, v.namespaceSelectorErr +} + +func (v *validatingWebhookAccessor) GetParsedObjectSelector() (labels.Selector, error) { + v.initObjectSelector.Do(func() { + v.objectSelector, v.objectSelectorErr = metav1.LabelSelectorAsSelector(v.ObjectSelector) + }) + return v.objectSelector, v.objectSelectorErr +} + +func (v *validatingWebhookAccessor) GetName() string { + return v.Name +} + +func (v *validatingWebhookAccessor) GetClientConfig() v1.WebhookClientConfig { + return v.ClientConfig +} + +func (v *validatingWebhookAccessor) GetRules() []v1.RuleWithOperations { + return v.Rules +} + +func (v *validatingWebhookAccessor) GetFailurePolicy() *v1.FailurePolicyType { + return v.FailurePolicy +} + +func (v *validatingWebhookAccessor) GetMatchPolicy() *v1.MatchPolicyType { + return v.MatchPolicy +} + +func (v *validatingWebhookAccessor) GetNamespaceSelector() *metav1.LabelSelector { + return v.NamespaceSelector +} + +func (v *validatingWebhookAccessor) GetObjectSelector() *metav1.LabelSelector { + return v.ObjectSelector +} + +func (v *validatingWebhookAccessor) GetSideEffects() *v1.SideEffectClass { + return v.SideEffects +} + +func (v *validatingWebhookAccessor) GetTimeoutSeconds() *int32 { + return v.TimeoutSeconds +} + +func (v *validatingWebhookAccessor) GetAdmissionReviewVersions() []string { + return v.AdmissionReviewVersions +} + +func (v *validatingWebhookAccessor) GetMutatingWebhook() (*v1.MutatingWebhook, bool) { + return nil, false +} + +func (v *validatingWebhookAccessor) GetValidatingWebhook() (*v1.ValidatingWebhook, bool) { + return v.ValidatingWebhook, true +} + +// hookClientConfigForWebhook construct a webhookutil.ClientConfig using a WebhookAccessor to access +// v1beta1.MutatingWebhook and v1beta1.ValidatingWebhook API objects. webhookutil.ClientConfig is used +// to create a HookClient and the purpose of the config struct is to share that with other packages +// that need to create a HookClient. +func hookClientConfigForWebhook(w WebhookAccessor) webhookutil.ClientConfig { + ret := webhookutil.ClientConfig{Name: w.GetName(), CABundle: w.GetClientConfig().CABundle} + if w.GetClientConfig().URL != nil { + ret.URL = *w.GetClientConfig().URL + } + if w.GetClientConfig().Service != nil { + ret.Service = &webhookutil.ClientConfigService{ + Name: w.GetClientConfig().Service.Name, + Namespace: w.GetClientConfig().Service.Namespace, + } + if w.GetClientConfig().Service.Port != nil { + ret.Service.Port = *w.GetClientConfig().Service.Port + } else { + ret.Service.Port = 443 + } + if w.GetClientConfig().Service.Path != nil { + ret.Service.Path = *w.GetClientConfig().Service.Path + } + } + return ret +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go new file mode 100644 index 0000000000..63ab310393 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package webhookadmission diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go new file mode 100644 index 0000000000..2f49b89761 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhookadmission + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &WebhookAdmission{}, + ) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("WebhookAdmissionConfiguration"), + &WebhookAdmission{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go new file mode 100644 index 0000000000..71ce47b1f4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go @@ -0,0 +1,29 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhookadmission + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WebhookAdmission provides configuration for the webhook admission controller. +type WebhookAdmission struct { + metav1.TypeMeta + + // KubeConfigFile is the path to the kubeconfig file. + KubeConfigFile string +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go new file mode 100644 index 0000000000..92cfed1074 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io + +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go new file mode 100644 index 0000000000..4a9c0a689b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("WebhookAdmissionConfiguration"), + &WebhookAdmission{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go new file mode 100644 index 0000000000..632427d7df --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go @@ -0,0 +1,29 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WebhookAdmission provides configuration for the webhook admission controller. +type WebhookAdmission struct { + metav1.TypeMeta `json:",inline"` + + // KubeConfigFile is the path to the kubeconfig file. + KubeConfigFile string `json:"kubeConfigFile"` +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..65eb414fcc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go @@ -0,0 +1,67 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + webhookadmission "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*WebhookAdmission)(nil), (*webhookadmission.WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(a.(*WebhookAdmission), b.(*webhookadmission.WebhookAdmission), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*webhookadmission.WebhookAdmission)(nil), (*WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(a.(*webhookadmission.WebhookAdmission), b.(*WebhookAdmission), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + return nil +} + +// Convert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission is an autogenerated conversion function. +func Convert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { + return autoConvert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in, out, s) +} + +func autoConvert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + return nil +} + +// Convert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission is an autogenerated conversion function. +func Convert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { + return autoConvert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..99fc6a6fa7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go @@ -0,0 +1,50 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookAdmission) DeepCopyInto(out *WebhookAdmission) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookAdmission. +func (in *WebhookAdmission) DeepCopy() *WebhookAdmission { + if in == nil { + return nil + } + out := new(WebhookAdmission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebhookAdmission) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..cce2e603a6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go new file mode 100644 index 0000000000..703f467f9f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io + +// Package v1alpha1 is the v1alpha1 version of the API. +package v1alpha1 diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go new file mode 100644 index 0000000000..56489f7804 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &WebhookAdmission{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go new file mode 100644 index 0000000000..a49a6a813e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go @@ -0,0 +1,29 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WebhookAdmission provides configuration for the webhook admission controller. +type WebhookAdmission struct { + metav1.TypeMeta `json:",inline"` + + // KubeConfigFile is the path to the kubeconfig file. + KubeConfigFile string `json:"kubeConfigFile"` +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go new file mode 100644 index 0000000000..eadb147c40 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,67 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + webhookadmission "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*WebhookAdmission)(nil), (*webhookadmission.WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(a.(*WebhookAdmission), b.(*webhookadmission.WebhookAdmission), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*webhookadmission.WebhookAdmission)(nil), (*WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(a.(*webhookadmission.WebhookAdmission), b.(*WebhookAdmission), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + return nil +} + +// Convert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission is an autogenerated conversion function. +func Convert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in, out, s) +} + +func autoConvert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + return nil +} + +// Convert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission is an autogenerated conversion function. +func Convert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { + return autoConvert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a59d62d6c4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,50 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookAdmission) DeepCopyInto(out *WebhookAdmission) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookAdmission. +func (in *WebhookAdmission) DeepCopy() *WebhookAdmission { + if in == nil { + return nil + } + out := new(WebhookAdmission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebhookAdmission) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go new file mode 100644 index 0000000000..dd621a3acd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go new file mode 100644 index 0000000000..90b7e0ae62 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go @@ -0,0 +1,50 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package webhookadmission + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookAdmission) DeepCopyInto(out *WebhookAdmission) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookAdmission. +func (in *WebhookAdmission) DeepCopy() *WebhookAdmission { + if in == nil { + return nil + } + out := new(WebhookAdmission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebhookAdmission) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go new file mode 100644 index 0000000000..78f5312a47 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go @@ -0,0 +1,71 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "io" + "io/ioutil" + "path" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1" +) + +var ( + scheme = runtime.NewScheme() + codecs = serializer.NewCodecFactory(scheme) +) + +func init() { + utilruntime.Must(webhookadmission.AddToScheme(scheme)) + utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(v1alpha1.AddToScheme(scheme)) +} + +// LoadConfig extract the KubeConfigFile from configFile +func LoadConfig(configFile io.Reader) (string, error) { + var kubeconfigFile string + if configFile != nil { + // we have a config so parse it. + data, err := ioutil.ReadAll(configFile) + if err != nil { + return "", err + } + decoder := codecs.UniversalDecoder() + decodedObj, err := runtime.Decode(decoder, data) + if err != nil { + return "", err + } + config, ok := decodedObj.(*webhookadmission.WebhookAdmission) + if !ok { + return "", fmt.Errorf("unexpected type: %T", decodedObj) + } + + if !path.IsAbs(config.KubeConfigFile) { + return "", field.Invalid(field.NewPath("kubeConfigFile"), config.KubeConfigFile, "must be an absolute file path") + } + + kubeconfigFile = config.KubeConfigFile + } + return kubeconfigFile, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/doc.go new file mode 100644 index 0000000000..6e86a1b5f2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors contains utilities for admission webhook specific errors +package errors // import "k8s.io/apiserver/pkg/admission/plugin/webhook/errors" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/statuserror.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/statuserror.go new file mode 100644 index 0000000000..00bbf54d2c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/statuserror.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "fmt" + "net/http" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ToStatusErr returns a StatusError with information about the webhook plugin +func ToStatusErr(webhookName string, result *metav1.Status) *apierrors.StatusError { + deniedBy := fmt.Sprintf("admission webhook %q denied the request", webhookName) + const noExp = "without explanation" + + if result == nil { + result = &metav1.Status{Status: metav1.StatusFailure} + } + + // Make sure we don't return < 400 status codes along with a rejection + if result.Code < http.StatusBadRequest { + result.Code = http.StatusBadRequest + } + // Make sure we don't return "" or "Success" status along with a rejection + if result.Status == "" || result.Status == metav1.StatusSuccess { + result.Status = metav1.StatusFailure + } + + switch { + case len(result.Message) > 0: + result.Message = fmt.Sprintf("%s: %s", deniedBy, result.Message) + case len(result.Reason) > 0: + result.Message = fmt.Sprintf("%s: %s", deniedBy, result.Reason) + default: + result.Message = fmt.Sprintf("%s %s", deniedBy, noExp) + } + + return &apierrors.StatusError{ + ErrStatus: *result, + } +} + +// NewDryRunUnsupportedErr returns a StatusError with information about the webhook plugin +func NewDryRunUnsupportedErr(webhookName string) *apierrors.StatusError { + reason := fmt.Sprintf("admission webhook %q does not support dry run", webhookName) + return apierrors.NewBadRequest(reason) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go new file mode 100644 index 0000000000..f0e0ed79c1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go @@ -0,0 +1,112 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +// ConvertToGVK converts object to the desired gvk. +func ConvertToGVK(obj runtime.Object, gvk schema.GroupVersionKind, o admission.ObjectInterfaces) (runtime.Object, error) { + // Unlike other resources, custom resources do not have internal version, so + // if obj is a custom resource, it should not need conversion. + if obj.GetObjectKind().GroupVersionKind() == gvk { + return obj, nil + } + out, err := o.GetObjectCreater().New(gvk) + if err != nil { + return nil, err + } + err = o.GetObjectConvertor().Convert(obj, out, nil) + if err != nil { + return nil, err + } + // Explicitly set the GVK + out.GetObjectKind().SetGroupVersionKind(gvk) + return out, nil +} + +// NewVersionedAttributes returns versioned attributes with the old and new object (if non-nil) converted to the requested kind +func NewVersionedAttributes(attr admission.Attributes, gvk schema.GroupVersionKind, o admission.ObjectInterfaces) (*VersionedAttributes, error) { + // convert the old and new objects to the requested version + versionedAttr := &VersionedAttributes{ + Attributes: attr, + VersionedKind: gvk, + } + if oldObj := attr.GetOldObject(); oldObj != nil { + out, err := ConvertToGVK(oldObj, gvk, o) + if err != nil { + return nil, err + } + versionedAttr.VersionedOldObject = out + } + if obj := attr.GetObject(); obj != nil { + out, err := ConvertToGVK(obj, gvk, o) + if err != nil { + return nil, err + } + versionedAttr.VersionedObject = out + } + return versionedAttr, nil +} + +// ConvertVersionedAttributes converts VersionedObject and VersionedOldObject to the specified kind, if needed. +// If attr.VersionedKind already matches the requested kind, no conversion is performed. +// If conversion is required: +// * attr.VersionedObject is used as the source for the new object if Dirty=true (and is round-tripped through attr.Attributes.Object, clearing Dirty in the process) +// * attr.Attributes.Object is used as the source for the new object if Dirty=false +// * attr.Attributes.OldObject is used as the source for the old object +func ConvertVersionedAttributes(attr *VersionedAttributes, gvk schema.GroupVersionKind, o admission.ObjectInterfaces) error { + // we already have the desired kind, we're done + if attr.VersionedKind == gvk { + return nil + } + + // convert the original old object to the desired GVK + if oldObj := attr.Attributes.GetOldObject(); oldObj != nil { + out, err := ConvertToGVK(oldObj, gvk, o) + if err != nil { + return err + } + attr.VersionedOldObject = out + } + + if attr.VersionedObject != nil { + // convert the existing versioned object to internal + if attr.Dirty { + err := o.GetObjectConvertor().Convert(attr.VersionedObject, attr.Attributes.GetObject(), nil) + if err != nil { + return err + } + } + + // and back to external + out, err := ConvertToGVK(attr.Attributes.GetObject(), gvk, o) + if err != nil { + return err + } + attr.VersionedObject = out + } + + // Remember we converted to this version + attr.VersionedKind = gvk + attr.Dirty = false + + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/interfaces.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/interfaces.go new file mode 100644 index 0000000000..4381691ef8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/interfaces.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/webhook" +) + +// Source can list dynamic webhook plugins. +type Source interface { + Webhooks() []webhook.WebhookAccessor + HasSynced() bool +} + +// VersionedAttributes is a wrapper around the original admission attributes, adding versioned +// variants of the object and old object. +type VersionedAttributes struct { + // Attributes holds the original admission attributes + admission.Attributes + // VersionedOldObject holds Attributes.OldObject (if non-nil), converted to VersionedKind. + // It must never be mutated. + VersionedOldObject runtime.Object + // VersionedObject holds Attributes.Object (if non-nil), converted to VersionedKind. + // If mutated, Dirty must be set to true by the mutator. + VersionedObject runtime.Object + // VersionedKind holds the fully qualified kind + VersionedKind schema.GroupVersionKind + // Dirty indicates VersionedObject has been modified since being converted from Attributes.Object + Dirty bool +} + +// GetObject overrides the Attributes.GetObject() +func (v *VersionedAttributes) GetObject() runtime.Object { + if v.VersionedObject != nil { + return v.VersionedObject + } + return v.Attributes.GetObject() +} + +// WebhookInvocation describes how to call a webhook, including the resource and subresource the webhook registered for, +// and the kind that should be sent to the webhook. +type WebhookInvocation struct { + Webhook webhook.WebhookAccessor + Resource schema.GroupVersionResource + Subresource string + Kind schema.GroupVersionKind +} + +// Dispatcher dispatches webhook call to a list of webhooks with admission attributes as argument. +type Dispatcher interface { + // Dispatch a request to the webhooks. Dispatcher may choose not to + // call a hook, either because the rules of the hook does not match, or + // the namespaceSelector or the objectSelector of the hook does not + // match. A non-nil error means the request is rejected. + Dispatch(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces, hooks []webhook.WebhookAccessor) error +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go new file mode 100644 index 0000000000..c04225e94f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go @@ -0,0 +1,230 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "context" + "fmt" + "io" + + admissionv1 "k8s.io/api/admission/v1" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config" + "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace" + "k8s.io/apiserver/pkg/admission/plugin/webhook/object" + "k8s.io/apiserver/pkg/admission/plugin/webhook/rules" + webhookutil "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" +) + +// Webhook is an abstract admission plugin with all the infrastructure to define Admit or Validate on-top. +type Webhook struct { + *admission.Handler + + sourceFactory sourceFactory + + hookSource Source + clientManager *webhookutil.ClientManager + namespaceMatcher *namespace.Matcher + objectMatcher *object.Matcher + dispatcher Dispatcher +} + +var ( + _ genericadmissioninit.WantsExternalKubeClientSet = &Webhook{} + _ admission.Interface = &Webhook{} +) + +type sourceFactory func(f informers.SharedInformerFactory) Source +type dispatcherFactory func(cm *webhookutil.ClientManager) Dispatcher + +// NewWebhook creates a new generic admission webhook. +func NewWebhook(handler *admission.Handler, configFile io.Reader, sourceFactory sourceFactory, dispatcherFactory dispatcherFactory) (*Webhook, error) { + kubeconfigFile, err := config.LoadConfig(configFile) + if err != nil { + return nil, err + } + + cm, err := webhookutil.NewClientManager( + []schema.GroupVersion{ + admissionv1beta1.SchemeGroupVersion, + admissionv1.SchemeGroupVersion, + }, + admissionv1beta1.AddToScheme, + admissionv1.AddToScheme, + ) + if err != nil { + return nil, err + } + authInfoResolver, err := webhookutil.NewDefaultAuthenticationInfoResolver(kubeconfigFile) + if err != nil { + return nil, err + } + // Set defaults which may be overridden later. + cm.SetAuthenticationInfoResolver(authInfoResolver) + cm.SetServiceResolver(webhookutil.NewDefaultServiceResolver()) + + return &Webhook{ + Handler: handler, + sourceFactory: sourceFactory, + clientManager: &cm, + namespaceMatcher: &namespace.Matcher{}, + objectMatcher: &object.Matcher{}, + dispatcher: dispatcherFactory(&cm), + }, nil +} + +// SetAuthenticationInfoResolverWrapper sets the +// AuthenticationInfoResolverWrapper. +// TODO find a better way wire this, but keep this pull small for now. +func (a *Webhook) SetAuthenticationInfoResolverWrapper(wrapper webhookutil.AuthenticationInfoResolverWrapper) { + a.clientManager.SetAuthenticationInfoResolverWrapper(wrapper) +} + +// SetServiceResolver sets a service resolver for the webhook admission plugin. +// Passing a nil resolver does not have an effect, instead a default one will be used. +func (a *Webhook) SetServiceResolver(sr webhookutil.ServiceResolver) { + a.clientManager.SetServiceResolver(sr) +} + +// SetExternalKubeClientSet implements the WantsExternalKubeInformerFactory interface. +// It sets external ClientSet for admission plugins that need it +func (a *Webhook) SetExternalKubeClientSet(client clientset.Interface) { + a.namespaceMatcher.Client = client +} + +// SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. +func (a *Webhook) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + namespaceInformer := f.Core().V1().Namespaces() + a.namespaceMatcher.NamespaceLister = namespaceInformer.Lister() + a.hookSource = a.sourceFactory(f) + a.SetReadyFunc(func() bool { + return namespaceInformer.Informer().HasSynced() && a.hookSource.HasSynced() + }) +} + +// ValidateInitialization implements the InitializationValidator interface. +func (a *Webhook) ValidateInitialization() error { + if a.hookSource == nil { + return fmt.Errorf("kubernetes client is not properly setup") + } + if err := a.namespaceMatcher.Validate(); err != nil { + return fmt.Errorf("namespaceMatcher is not properly setup: %v", err) + } + if err := a.clientManager.Validate(); err != nil { + return fmt.Errorf("clientManager is not properly setup: %v", err) + } + return nil +} + +// ShouldCallHook returns invocation details if the webhook should be called, nil if the webhook should not be called, +// or an error if an error was encountered during evaluation. +func (a *Webhook) ShouldCallHook(h webhook.WebhookAccessor, attr admission.Attributes, o admission.ObjectInterfaces) (*WebhookInvocation, *apierrors.StatusError) { + matches, matchNsErr := a.namespaceMatcher.MatchNamespaceSelector(h, attr) + // Should not return an error here for webhooks which do not apply to the request, even if err is an unexpected scenario. + if !matches && matchNsErr == nil { + return nil, nil + } + + // Should not return an error here for webhooks which do not apply to the request, even if err is an unexpected scenario. + matches, matchObjErr := a.objectMatcher.MatchObjectSelector(h, attr) + if !matches && matchObjErr == nil { + return nil, nil + } + + var invocation *WebhookInvocation + for _, r := range h.GetRules() { + m := rules.Matcher{Rule: r, Attr: attr} + if m.Matches() { + invocation = &WebhookInvocation{ + Webhook: h, + Resource: attr.GetResource(), + Subresource: attr.GetSubresource(), + Kind: attr.GetKind(), + } + break + } + } + if invocation == nil && h.GetMatchPolicy() != nil && *h.GetMatchPolicy() == v1.Equivalent { + attrWithOverride := &attrWithResourceOverride{Attributes: attr} + equivalents := o.GetEquivalentResourceMapper().EquivalentResourcesFor(attr.GetResource(), attr.GetSubresource()) + // honor earlier rules first + OuterLoop: + for _, r := range h.GetRules() { + // see if the rule matches any of the equivalent resources + for _, equivalent := range equivalents { + if equivalent == attr.GetResource() { + // exclude attr.GetResource(), which we already checked + continue + } + attrWithOverride.resource = equivalent + m := rules.Matcher{Rule: r, Attr: attrWithOverride} + if m.Matches() { + kind := o.GetEquivalentResourceMapper().KindFor(equivalent, attr.GetSubresource()) + if kind.Empty() { + return nil, apierrors.NewInternalError(fmt.Errorf("unable to convert to %v: unknown kind", equivalent)) + } + invocation = &WebhookInvocation{ + Webhook: h, + Resource: equivalent, + Subresource: attr.GetSubresource(), + Kind: kind, + } + break OuterLoop + } + } + } + } + + if invocation == nil { + return nil, nil + } + if matchNsErr != nil { + return nil, matchNsErr + } + if matchObjErr != nil { + return nil, matchObjErr + } + + return invocation, nil +} + +type attrWithResourceOverride struct { + admission.Attributes + resource schema.GroupVersionResource +} + +func (a *attrWithResourceOverride) GetResource() schema.GroupVersionResource { return a.resource } + +// Dispatch is called by the downstream Validate or Admit methods. +func (a *Webhook) Dispatch(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + if rules.IsWebhookConfigurationResource(attr) { + return nil + } + if !a.WaitForReady() { + return admission.NewForbidden(attr, fmt.Errorf("not yet ready to handle request")) + } + hooks := a.hookSource.Webhooks() + return a.dispatcher.Dispatch(ctx, attr, o, hooks) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go new file mode 100644 index 0000000000..4cf9f37117 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go @@ -0,0 +1,430 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mutating delegates admission checks to dynamically configured +// mutating webhooks. +package mutating + +import ( + "context" + "fmt" + "time" + + jsonpatch "github.com/evanphx/json-patch" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/klog/v2" + + admissionv1 "k8s.io/api/admission/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + webhookerrors "k8s.io/apiserver/pkg/admission/plugin/webhook/errors" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" + webhookrequest "k8s.io/apiserver/pkg/admission/plugin/webhook/request" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + webhookutil "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/apiserver/pkg/warning" + utiltrace "k8s.io/utils/trace" +) + +const ( + // PatchAuditAnnotationPrefix is a prefix for persisting webhook patch in audit annotation. + // Audit handler decides whether annotation with this prefix should be logged based on audit level. + // Since mutating webhook patches the request body, audit level must be greater or equal to Request + // for the annotation to be logged + PatchAuditAnnotationPrefix = "patch.webhook.admission.k8s.io/" + // MutationAuditAnnotationPrefix is a prefix for presisting webhook mutation existence in audit annotation. + MutationAuditAnnotationPrefix = "mutation.webhook.admission.k8s.io/" +) + +var encodingjson = json.CaseSensitiveJSONIterator() + +type mutatingDispatcher struct { + cm *webhookutil.ClientManager + plugin *Plugin +} + +func newMutatingDispatcher(p *Plugin) func(cm *webhookutil.ClientManager) generic.Dispatcher { + return func(cm *webhookutil.ClientManager) generic.Dispatcher { + return &mutatingDispatcher{cm, p} + } +} + +var _ generic.Dispatcher = &mutatingDispatcher{} + +func (a *mutatingDispatcher) Dispatch(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces, hooks []webhook.WebhookAccessor) error { + reinvokeCtx := attr.GetReinvocationContext() + var webhookReinvokeCtx *webhookReinvokeContext + if v := reinvokeCtx.Value(PluginName); v != nil { + webhookReinvokeCtx = v.(*webhookReinvokeContext) + } else { + webhookReinvokeCtx = &webhookReinvokeContext{} + reinvokeCtx.SetValue(PluginName, webhookReinvokeCtx) + } + + if reinvokeCtx.IsReinvoke() && webhookReinvokeCtx.IsOutputChangedSinceLastWebhookInvocation(attr.GetObject()) { + // If the object has changed, we know the in-tree plugin re-invocations have mutated the object, + // and we need to reinvoke all eligible webhooks. + webhookReinvokeCtx.RequireReinvokingPreviouslyInvokedPlugins() + } + defer func() { + webhookReinvokeCtx.SetLastWebhookInvocationOutput(attr.GetObject()) + }() + var versionedAttr *generic.VersionedAttributes + for i, hook := range hooks { + attrForCheck := attr + if versionedAttr != nil { + attrForCheck = versionedAttr + } + invocation, statusErr := a.plugin.ShouldCallHook(hook, attrForCheck, o) + if statusErr != nil { + return statusErr + } + if invocation == nil { + continue + } + hook, ok := invocation.Webhook.GetMutatingWebhook() + if !ok { + return fmt.Errorf("mutating webhook dispatch requires v1.MutatingWebhook, but got %T", hook) + } + // This means that during reinvocation, a webhook will not be + // called for the first time. For example, if the webhook is + // skipped in the first round because of mismatching labels, + // even if the labels become matching, the webhook does not + // get called during reinvocation. + if reinvokeCtx.IsReinvoke() && !webhookReinvokeCtx.ShouldReinvokeWebhook(invocation.Webhook.GetUID()) { + continue + } + + if versionedAttr == nil { + // First webhook, create versioned attributes + var err error + if versionedAttr, err = generic.NewVersionedAttributes(attr, invocation.Kind, o); err != nil { + return apierrors.NewInternalError(err) + } + } else { + // Subsequent webhook, convert existing versioned attributes to this webhook's version + if err := generic.ConvertVersionedAttributes(versionedAttr, invocation.Kind, o); err != nil { + return apierrors.NewInternalError(err) + } + } + + t := time.Now() + round := 0 + if reinvokeCtx.IsReinvoke() { + round = 1 + } + changed, err := a.callAttrMutatingHook(ctx, hook, invocation, versionedAttr, o, round, i) + ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == admissionregistrationv1.Ignore + rejected := false + if err != nil { + switch err := err.(type) { + case *webhookutil.ErrCallingWebhook: + if !ignoreClientCallFailures { + rejected = true + admissionmetrics.Metrics.ObserveWebhookRejection(hook.Name, "admit", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionCallingWebhookError, 0) + } + case *webhookutil.ErrWebhookRejection: + rejected = true + admissionmetrics.Metrics.ObserveWebhookRejection(hook.Name, "admit", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionNoError, int(err.Status.ErrStatus.Code)) + default: + rejected = true + admissionmetrics.Metrics.ObserveWebhookRejection(hook.Name, "admit", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionAPIServerInternalError, 0) + } + } + admissionmetrics.Metrics.ObserveWebhook(time.Since(t), rejected, versionedAttr.Attributes, "admit", hook.Name) + if changed { + // Patch had changed the object. Prepare to reinvoke all previous webhooks that are eligible for re-invocation. + webhookReinvokeCtx.RequireReinvokingPreviouslyInvokedPlugins() + reinvokeCtx.SetShouldReinvoke() + } + if hook.ReinvocationPolicy != nil && *hook.ReinvocationPolicy == admissionregistrationv1.IfNeededReinvocationPolicy { + webhookReinvokeCtx.AddReinvocableWebhookToPreviouslyInvoked(invocation.Webhook.GetUID()) + } + if err == nil { + continue + } + + if callErr, ok := err.(*webhookutil.ErrCallingWebhook); ok { + if ignoreClientCallFailures { + klog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) + utilruntime.HandleError(callErr) + + select { + case <-ctx.Done(): + // parent context is canceled or timed out, no point in continuing + return apierrors.NewTimeoutError("request did not complete within requested timeout", 0) + default: + // individual webhook timed out, but parent context did not, continue + continue + } + } + klog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) + return apierrors.NewInternalError(err) + } + if rejectionErr, ok := err.(*webhookutil.ErrWebhookRejection); ok { + return rejectionErr.Status + } + return err + } + + // convert versionedAttr.VersionedObject to the internal version in the underlying admission.Attributes + if versionedAttr != nil && versionedAttr.VersionedObject != nil && versionedAttr.Dirty { + return o.GetObjectConvertor().Convert(versionedAttr.VersionedObject, versionedAttr.Attributes.GetObject(), nil) + } + + return nil +} + +// note that callAttrMutatingHook updates attr + +func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *admissionregistrationv1.MutatingWebhook, invocation *generic.WebhookInvocation, attr *generic.VersionedAttributes, o admission.ObjectInterfaces, round, idx int) (bool, error) { + configurationName := invocation.Webhook.GetConfigurationName() + annotator := newWebhookAnnotator(attr, round, idx, h.Name, configurationName) + changed := false + defer func() { annotator.addMutationAnnotation(changed) }() + if attr.Attributes.IsDryRun() { + if h.SideEffects == nil { + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("Webhook SideEffects is nil")} + } + if !(*h.SideEffects == admissionregistrationv1.SideEffectClassNone || *h.SideEffects == admissionregistrationv1.SideEffectClassNoneOnDryRun) { + return false, webhookerrors.NewDryRunUnsupportedErr(h.Name) + } + } + + uid, request, response, err := webhookrequest.CreateAdmissionObjects(attr, invocation) + if err != nil { + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + // Make the webhook request + client, err := invocation.Webhook.GetRESTClient(a.cm) + if err != nil { + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + trace := utiltrace.New("Call mutating webhook", + utiltrace.Field{"configuration", configurationName}, + utiltrace.Field{"webhook", h.Name}, + utiltrace.Field{"resource", attr.GetResource()}, + utiltrace.Field{"subresource", attr.GetSubresource()}, + utiltrace.Field{"operation", attr.GetOperation()}, + utiltrace.Field{"UID", uid}) + defer trace.LogIfLong(500 * time.Millisecond) + + // if the webhook has a specific timeout, wrap the context to apply it + if h.TimeoutSeconds != nil { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(*h.TimeoutSeconds)*time.Second) + defer cancel() + } + + r := client.Post().Body(request) + + // if the context has a deadline, set it as a parameter to inform the backend + if deadline, hasDeadline := ctx.Deadline(); hasDeadline { + // compute the timeout + if timeout := time.Until(deadline); timeout > 0 { + // if it's not an even number of seconds, round up to the nearest second + if truncated := timeout.Truncate(time.Second); truncated != timeout { + timeout = truncated + time.Second + } + // set the timeout + r.Timeout(timeout) + } + } + + if err := r.Do(ctx).Into(response); err != nil { + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + trace.Step("Request completed") + + result, err := webhookrequest.VerifyAdmissionResponse(uid, true, response) + if err != nil { + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + + for k, v := range result.AuditAnnotations { + key := h.Name + "/" + k + if err := attr.Attributes.AddAnnotation(key, v); err != nil { + klog.Warningf("Failed to set admission audit annotation %s to %s for mutating webhook %s: %v", key, v, h.Name, err) + } + } + for _, w := range result.Warnings { + warning.AddWarning(ctx, "", w) + } + + if !result.Allowed { + return false, &webhookutil.ErrWebhookRejection{Status: webhookerrors.ToStatusErr(h.Name, result.Result)} + } + + if len(result.Patch) == 0 { + return false, nil + } + patchObj, err := jsonpatch.DecodePatch(result.Patch) + if err != nil { + return false, apierrors.NewInternalError(err) + } + + if len(patchObj) == 0 { + return false, nil + } + + // if a non-empty patch was provided, and we have no object we can apply it to (e.g. a DELETE admission operation), error + if attr.VersionedObject == nil { + return false, apierrors.NewInternalError(fmt.Errorf("admission webhook %q attempted to modify the object, which is not supported for this operation", h.Name)) + } + + var patchedJS []byte + jsonSerializer := json.NewSerializer(json.DefaultMetaFactory, o.GetObjectCreater(), o.GetObjectTyper(), false) + switch result.PatchType { + // VerifyAdmissionResponse normalizes to v1 patch types, regardless of the AdmissionReview version used + case admissionv1.PatchTypeJSONPatch: + objJS, err := runtime.Encode(jsonSerializer, attr.VersionedObject) + if err != nil { + return false, apierrors.NewInternalError(err) + } + patchedJS, err = patchObj.Apply(objJS) + if err != nil { + return false, apierrors.NewInternalError(err) + } + default: + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("unsupported patch type %q", result.PatchType)} + } + + var newVersionedObject runtime.Object + if _, ok := attr.VersionedObject.(*unstructured.Unstructured); ok { + // Custom Resources don't have corresponding Go struct's. + // They are represented as Unstructured. + newVersionedObject = &unstructured.Unstructured{} + } else { + newVersionedObject, err = o.GetObjectCreater().New(attr.VersionedKind) + if err != nil { + return false, apierrors.NewInternalError(err) + } + } + + // TODO: if we have multiple mutating webhooks, we can remember the json + // instead of encoding and decoding for each one. + if newVersionedObject, _, err = jsonSerializer.Decode(patchedJS, nil, newVersionedObject); err != nil { + return false, apierrors.NewInternalError(err) + } + + changed = !apiequality.Semantic.DeepEqual(attr.VersionedObject, newVersionedObject) + trace.Step("Patch applied") + annotator.addPatchAnnotation(patchObj, result.PatchType) + attr.Dirty = true + attr.VersionedObject = newVersionedObject + o.GetObjectDefaulter().Default(attr.VersionedObject) + return changed, nil +} + +type webhookAnnotator struct { + attr *generic.VersionedAttributes + patchAnnotationKey string + mutationAnnotationKey string + webhook string + configuration string +} + +func newWebhookAnnotator(attr *generic.VersionedAttributes, round, idx int, webhook, configuration string) *webhookAnnotator { + return &webhookAnnotator{ + attr: attr, + patchAnnotationKey: fmt.Sprintf("%sround_%d_index_%d", PatchAuditAnnotationPrefix, round, idx), + mutationAnnotationKey: fmt.Sprintf("%sround_%d_index_%d", MutationAuditAnnotationPrefix, round, idx), + webhook: webhook, + configuration: configuration, + } +} + +func (w *webhookAnnotator) addMutationAnnotation(mutated bool) { + if w.attr == nil || w.attr.Attributes == nil { + return + } + value, err := mutationAnnotationValue(w.configuration, w.webhook, mutated) + if err != nil { + klog.Warningf("unexpected error composing mutating webhook annotation: %v", err) + return + } + if err := w.attr.Attributes.AddAnnotation(w.mutationAnnotationKey, value); err != nil { + klog.Warningf("failed to set mutation annotation for mutating webhook key %s to %s: %v", w.mutationAnnotationKey, value, err) + } +} + +func (w *webhookAnnotator) addPatchAnnotation(patch interface{}, patchType admissionv1.PatchType) { + if w.attr == nil || w.attr.Attributes == nil { + return + } + var value string + var err error + switch patchType { + case admissionv1.PatchTypeJSONPatch: + value, err = jsonPatchAnnotationValue(w.configuration, w.webhook, patch) + if err != nil { + klog.Warningf("unexpected error composing mutating webhook JSON patch annotation: %v", err) + return + } + default: + klog.Warningf("unsupported patch type for mutating webhook annotation: %v", patchType) + return + } + if err := w.attr.Attributes.AddAnnotationWithLevel(w.patchAnnotationKey, value, auditinternal.LevelRequest); err != nil { + // NOTE: we don't log actual patch in kube-apiserver log to avoid potentially + // leaking information + klog.Warningf("failed to set patch annotation for mutating webhook key %s; confugiration name: %s, webhook name: %s", w.patchAnnotationKey, w.configuration, w.webhook) + } +} + +// MutationAuditAnnotation logs if a webhook invocation mutated the request object +type MutationAuditAnnotation struct { + Configuration string `json:"configuration"` + Webhook string `json:"webhook"` + Mutated bool `json:"mutated"` +} + +// PatchAuditAnnotation logs a patch from a mutating webhook +type PatchAuditAnnotation struct { + Configuration string `json:"configuration"` + Webhook string `json:"webhook"` + Patch interface{} `json:"patch,omitempty"` + PatchType string `json:"patchType,omitempty"` +} + +func mutationAnnotationValue(configuration, webhook string, mutated bool) (string, error) { + m := MutationAuditAnnotation{ + Configuration: configuration, + Webhook: webhook, + Mutated: mutated, + } + bytes, err := encodingjson.Marshal(m) + return string(bytes), err +} + +func jsonPatchAnnotationValue(configuration, webhook string, patch interface{}) (string, error) { + p := PatchAuditAnnotation{ + Configuration: configuration, + Webhook: webhook, + Patch: patch, + PatchType: string(admissionv1.PatchTypeJSONPatch), + } + bytes, err := encodingjson.Marshal(p) + return string(bytes), err +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/doc.go new file mode 100644 index 0000000000..d804aca1cf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mutating makes calls to mutating webhooks during the admission +// process. +package mutating // import "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/plugin.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/plugin.go new file mode 100644 index 0000000000..fb07a61c1b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/plugin.go @@ -0,0 +1,76 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + "context" + "io" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/configuration" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" +) + +const ( + // PluginName indicates the name of admission plug-in + PluginName = "MutatingAdmissionWebhook" +) + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(configFile io.Reader) (admission.Interface, error) { + plugin, err := NewMutatingWebhook(configFile) + if err != nil { + return nil, err + } + + return plugin, nil + }) +} + +// Plugin is an implementation of admission.Interface. +type Plugin struct { + *generic.Webhook +} + +var _ admission.MutationInterface = &Plugin{} + +// NewMutatingWebhook returns a generic admission webhook plugin. +func NewMutatingWebhook(configFile io.Reader) (*Plugin, error) { + handler := admission.NewHandler(admission.Connect, admission.Create, admission.Delete, admission.Update) + p := &Plugin{} + var err error + p.Webhook, err = generic.NewWebhook(handler, configFile, configuration.NewMutatingWebhookConfigurationManager, newMutatingDispatcher(p)) + if err != nil { + return nil, err + } + + return p, nil +} + +// ValidateInitialization implements the InitializationValidator interface. +func (a *Plugin) ValidateInitialization() error { + if err := a.Webhook.ValidateInitialization(); err != nil { + return err + } + return nil +} + +// Admit makes an admission decision based on the request attributes. +func (a *Plugin) Admit(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + return a.Webhook.Dispatch(ctx, attr, o) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/reinvocationcontext.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/reinvocationcontext.go new file mode 100644 index 0000000000..de0af221e1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/reinvocationcontext.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +type webhookReinvokeContext struct { + // lastWebhookOutput holds the result of the last webhook admission plugin call + lastWebhookOutput runtime.Object + // previouslyInvokedReinvocableWebhooks holds the set of webhooks that have been invoked and + // should be reinvoked if a later mutation occurs + previouslyInvokedReinvocableWebhooks sets.String + // reinvokeWebhooks holds the set of webhooks that should be reinvoked + reinvokeWebhooks sets.String +} + +func (rc *webhookReinvokeContext) ShouldReinvokeWebhook(webhook string) bool { + return rc.reinvokeWebhooks.Has(webhook) +} + +func (rc *webhookReinvokeContext) IsOutputChangedSinceLastWebhookInvocation(object runtime.Object) bool { + return !apiequality.Semantic.DeepEqual(rc.lastWebhookOutput, object) +} + +func (rc *webhookReinvokeContext) SetLastWebhookInvocationOutput(object runtime.Object) { + if object == nil { + rc.lastWebhookOutput = nil + return + } + rc.lastWebhookOutput = object.DeepCopyObject() +} + +func (rc *webhookReinvokeContext) AddReinvocableWebhookToPreviouslyInvoked(webhook string) { + if rc.previouslyInvokedReinvocableWebhooks == nil { + rc.previouslyInvokedReinvocableWebhooks = sets.NewString() + } + rc.previouslyInvokedReinvocableWebhooks.Insert(webhook) +} + +func (rc *webhookReinvokeContext) RequireReinvokingPreviouslyInvokedPlugins() { + if len(rc.previouslyInvokedReinvocableWebhooks) > 0 { + if rc.reinvokeWebhooks == nil { + rc.reinvokeWebhooks = sets.NewString() + } + for s := range rc.previouslyInvokedReinvocableWebhooks { + rc.reinvokeWebhooks.Insert(s) + } + rc.previouslyInvokedReinvocableWebhooks = sets.NewString() + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/doc.go new file mode 100644 index 0000000000..d1a2853383 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package namespace defines the utilities that are used by the webhook +// plugin to decide if a webhook should be applied to an object based on its +// namespace. +package namespace // import "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go new file mode 100644 index 0000000000..183be7b39a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go @@ -0,0 +1,121 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namespace + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" +) + +// Matcher decides if a request is exempted by the NamespaceSelector of a +// webhook configuration. +type Matcher struct { + NamespaceLister corelisters.NamespaceLister + Client clientset.Interface +} + +// Validate checks if the Matcher has a NamespaceLister and Client. +func (m *Matcher) Validate() error { + var errs []error + if m.NamespaceLister == nil { + errs = append(errs, fmt.Errorf("the namespace matcher requires a namespaceLister")) + } + if m.Client == nil { + errs = append(errs, fmt.Errorf("the namespace matcher requires a namespaceLister")) + } + return utilerrors.NewAggregate(errs) +} + +// GetNamespaceLabels gets the labels of the namespace related to the attr. +func (m *Matcher) GetNamespaceLabels(attr admission.Attributes) (map[string]string, error) { + // If the request itself is creating or updating a namespace, then get the + // labels from attr.Object, because namespaceLister doesn't have the latest + // namespace yet. + // + // However, if the request is deleting a namespace, then get the label from + // the namespace in the namespaceLister, because a delete request is not + // going to change the object, and attr.Object will be a DeleteOptions + // rather than a namespace object. + if attr.GetResource().Resource == "namespaces" && + len(attr.GetSubresource()) == 0 && + (attr.GetOperation() == admission.Create || attr.GetOperation() == admission.Update) { + accessor, err := meta.Accessor(attr.GetObject()) + if err != nil { + return nil, err + } + return accessor.GetLabels(), nil + } + + namespaceName := attr.GetNamespace() + namespace, err := m.NamespaceLister.Get(namespaceName) + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + if apierrors.IsNotFound(err) { + // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not + namespace, err = m.Client.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + } + return namespace.Labels, nil +} + +// MatchNamespaceSelector decideds whether the request matches the +// namespaceSelctor of the webhook. Only when they match, the webhook is called. +func (m *Matcher) MatchNamespaceSelector(h webhook.WebhookAccessor, attr admission.Attributes) (bool, *apierrors.StatusError) { + namespaceName := attr.GetNamespace() + if len(namespaceName) == 0 && attr.GetResource().Resource != "namespaces" { + // If the request is about a cluster scoped resource, and it is not a + // namespace, it is never exempted. + // TODO: figure out a way selective exempt cluster scoped resources. + // Also update the comment in types.go + return true, nil + } + selector, err := h.GetParsedNamespaceSelector() + if err != nil { + return false, apierrors.NewInternalError(err) + } + if selector.Empty() { + return true, nil + } + + namespaceLabels, err := m.GetNamespaceLabels(attr) + // this means the namespace is not found, for backwards compatibility, + // return a 404 + if apierrors.IsNotFound(err) { + status, ok := err.(apierrors.APIStatus) + if !ok { + return false, apierrors.NewInternalError(err) + } + return false, &apierrors.StatusError{status.Status()} + } + if err != nil { + return false, apierrors.NewInternalError(err) + } + return selector.Matches(labels.Set(namespaceLabels)), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/doc.go new file mode 100644 index 0000000000..93c4734409 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package object defines the utilities that are used by the webhook plugin to +// decide if a webhook should run, as long as either the old object or the new +// object has labels matching the webhook config's objectSelector. +package object // import "k8s.io/apiserver/pkg/admission/plugin/webhook/object" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/matcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/matcher.go new file mode 100644 index 0000000000..773e3e6ee6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/matcher.go @@ -0,0 +1,57 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + "k8s.io/klog/v2" +) + +// Matcher decides if a request selected by the ObjectSelector. +type Matcher struct { +} + +func matchObject(obj runtime.Object, selector labels.Selector) bool { + if obj == nil { + return false + } + accessor, err := meta.Accessor(obj) + if err != nil { + klog.V(5).Infof("cannot access metadata of %v: %v", obj, err) + return false + } + return selector.Matches(labels.Set(accessor.GetLabels())) + +} + +// MatchObjectSelector decideds whether the request matches the ObjectSelector +// of the webhook. Only when they match, the webhook is called. +func (m *Matcher) MatchObjectSelector(h webhook.WebhookAccessor, attr admission.Attributes) (bool, *apierrors.StatusError) { + selector, err := h.GetParsedObjectSelector() + if err != nil { + return false, apierrors.NewInternalError(err) + } + if selector.Empty() { + return true, nil + } + return matchObject(attr.GetObject(), selector) || matchObject(attr.GetOldObject(), selector), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go new file mode 100644 index 0000000000..c60d0fb9e7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go @@ -0,0 +1,283 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "fmt" + + admissionv1 "k8s.io/api/admission/v1" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" +) + +// AdmissionResponse contains the fields extracted from an AdmissionReview response +type AdmissionResponse struct { + AuditAnnotations map[string]string + Allowed bool + Patch []byte + PatchType admissionv1.PatchType + Result *metav1.Status + Warnings []string +} + +// VerifyAdmissionResponse checks the validity of the provided admission review object, and returns the +// audit annotations, whether the response allowed the request, any provided patch/patchType/status, +// or an error if the provided admission review was not valid. +func VerifyAdmissionResponse(uid types.UID, mutating bool, review runtime.Object) (*AdmissionResponse, error) { + switch r := review.(type) { + case *admissionv1.AdmissionReview: + if r.Response == nil { + return nil, fmt.Errorf("webhook response was absent") + } + + // Verify UID matches + if r.Response.UID != uid { + return nil, fmt.Errorf("expected response.uid=%q, got %q", uid, r.Response.UID) + } + + // Verify GVK + v1GVK := admissionv1.SchemeGroupVersion.WithKind("AdmissionReview") + if r.GroupVersionKind() != v1GVK { + return nil, fmt.Errorf("expected webhook response of %v, got %v", v1GVK.String(), r.GroupVersionKind().String()) + } + + patch := []byte(nil) + patchType := admissionv1.PatchType("") + + if mutating { + // Ensure a mutating webhook provides both patch and patchType together + if len(r.Response.Patch) > 0 && r.Response.PatchType == nil { + return nil, fmt.Errorf("webhook returned response.patch but not response.patchType") + } + if len(r.Response.Patch) == 0 && r.Response.PatchType != nil { + return nil, fmt.Errorf("webhook returned response.patchType but not response.patch") + } + patch = r.Response.Patch + if r.Response.PatchType != nil { + patchType = *r.Response.PatchType + if len(patchType) == 0 { + return nil, fmt.Errorf("webhook returned invalid response.patchType of %q", patchType) + } + } + } else { + // Ensure a validating webhook doesn't return patch or patchType + if len(r.Response.Patch) > 0 { + return nil, fmt.Errorf("validating webhook may not return response.patch") + } + if r.Response.PatchType != nil { + return nil, fmt.Errorf("validating webhook may not return response.patchType") + } + } + + return &AdmissionResponse{ + AuditAnnotations: r.Response.AuditAnnotations, + Allowed: r.Response.Allowed, + Patch: patch, + PatchType: patchType, + Result: r.Response.Result, + Warnings: r.Response.Warnings, + }, nil + + case *admissionv1beta1.AdmissionReview: + if r.Response == nil { + return nil, fmt.Errorf("webhook response was absent") + } + + // Response GVK and response.uid were not verified in v1beta1 handling, allow any + + patch := []byte(nil) + patchType := admissionv1.PatchType("") + if mutating { + patch = r.Response.Patch + if len(r.Response.Patch) > 0 { + // patch type was not verified in v1beta1 admissionreview handling. pin to only supported version if a patch is provided. + patchType = admissionv1.PatchTypeJSONPatch + } + } + + return &AdmissionResponse{ + AuditAnnotations: r.Response.AuditAnnotations, + Allowed: r.Response.Allowed, + Patch: patch, + PatchType: patchType, + Result: r.Response.Result, + Warnings: r.Response.Warnings, + }, nil + + default: + return nil, fmt.Errorf("unexpected response type %T", review) + } +} + +// CreateAdmissionObjects returns the unique request uid, the AdmissionReview object to send the webhook and to decode the response into, +// or an error if the webhook does not support receiving any of the admission review versions we know to send +func CreateAdmissionObjects(versionedAttributes *generic.VersionedAttributes, invocation *generic.WebhookInvocation) (uid types.UID, request, response runtime.Object, err error) { + for _, version := range invocation.Webhook.GetAdmissionReviewVersions() { + switch version { + case admissionv1.SchemeGroupVersion.Version: + uid := types.UID(uuid.NewUUID()) + request := CreateV1AdmissionReview(uid, versionedAttributes, invocation) + response := &admissionv1.AdmissionReview{} + return uid, request, response, nil + + case admissionv1beta1.SchemeGroupVersion.Version: + uid := types.UID(uuid.NewUUID()) + request := CreateV1beta1AdmissionReview(uid, versionedAttributes, invocation) + response := &admissionv1beta1.AdmissionReview{} + return uid, request, response, nil + + } + } + return "", nil, nil, fmt.Errorf("webhook does not accept known AdmissionReview versions (v1, v1beta1)") +} + +// CreateV1AdmissionReview creates an AdmissionReview for the provided admission.Attributes +func CreateV1AdmissionReview(uid types.UID, versionedAttributes *generic.VersionedAttributes, invocation *generic.WebhookInvocation) *admissionv1.AdmissionReview { + attr := versionedAttributes.Attributes + gvk := invocation.Kind + gvr := invocation.Resource + subresource := invocation.Subresource + requestGVK := attr.GetKind() + requestGVR := attr.GetResource() + requestSubResource := attr.GetSubresource() + aUserInfo := attr.GetUserInfo() + userInfo := authenticationv1.UserInfo{ + Extra: make(map[string]authenticationv1.ExtraValue), + Groups: aUserInfo.GetGroups(), + UID: aUserInfo.GetUID(), + Username: aUserInfo.GetName(), + } + dryRun := attr.IsDryRun() + + // Convert the extra information in the user object + for key, val := range aUserInfo.GetExtra() { + userInfo.Extra[key] = authenticationv1.ExtraValue(val) + } + + return &admissionv1.AdmissionReview{ + Request: &admissionv1.AdmissionRequest{ + UID: uid, + Kind: metav1.GroupVersionKind{ + Group: gvk.Group, + Kind: gvk.Kind, + Version: gvk.Version, + }, + Resource: metav1.GroupVersionResource{ + Group: gvr.Group, + Resource: gvr.Resource, + Version: gvr.Version, + }, + SubResource: subresource, + RequestKind: &metav1.GroupVersionKind{ + Group: requestGVK.Group, + Kind: requestGVK.Kind, + Version: requestGVK.Version, + }, + RequestResource: &metav1.GroupVersionResource{ + Group: requestGVR.Group, + Resource: requestGVR.Resource, + Version: requestGVR.Version, + }, + RequestSubResource: requestSubResource, + Name: attr.GetName(), + Namespace: attr.GetNamespace(), + Operation: admissionv1.Operation(attr.GetOperation()), + UserInfo: userInfo, + Object: runtime.RawExtension{ + Object: versionedAttributes.VersionedObject, + }, + OldObject: runtime.RawExtension{ + Object: versionedAttributes.VersionedOldObject, + }, + DryRun: &dryRun, + Options: runtime.RawExtension{ + Object: attr.GetOperationOptions(), + }, + }, + } +} + +// CreateV1beta1AdmissionReview creates an AdmissionReview for the provided admission.Attributes +func CreateV1beta1AdmissionReview(uid types.UID, versionedAttributes *generic.VersionedAttributes, invocation *generic.WebhookInvocation) *admissionv1beta1.AdmissionReview { + attr := versionedAttributes.Attributes + gvk := invocation.Kind + gvr := invocation.Resource + subresource := invocation.Subresource + requestGVK := attr.GetKind() + requestGVR := attr.GetResource() + requestSubResource := attr.GetSubresource() + aUserInfo := attr.GetUserInfo() + userInfo := authenticationv1.UserInfo{ + Extra: make(map[string]authenticationv1.ExtraValue), + Groups: aUserInfo.GetGroups(), + UID: aUserInfo.GetUID(), + Username: aUserInfo.GetName(), + } + dryRun := attr.IsDryRun() + + // Convert the extra information in the user object + for key, val := range aUserInfo.GetExtra() { + userInfo.Extra[key] = authenticationv1.ExtraValue(val) + } + + return &admissionv1beta1.AdmissionReview{ + Request: &admissionv1beta1.AdmissionRequest{ + UID: uid, + Kind: metav1.GroupVersionKind{ + Group: gvk.Group, + Kind: gvk.Kind, + Version: gvk.Version, + }, + Resource: metav1.GroupVersionResource{ + Group: gvr.Group, + Resource: gvr.Resource, + Version: gvr.Version, + }, + SubResource: subresource, + RequestKind: &metav1.GroupVersionKind{ + Group: requestGVK.Group, + Kind: requestGVK.Kind, + Version: requestGVK.Version, + }, + RequestResource: &metav1.GroupVersionResource{ + Group: requestGVR.Group, + Resource: requestGVR.Resource, + Version: requestGVR.Version, + }, + RequestSubResource: requestSubResource, + Name: attr.GetName(), + Namespace: attr.GetNamespace(), + Operation: admissionv1beta1.Operation(attr.GetOperation()), + UserInfo: userInfo, + Object: runtime.RawExtension{ + Object: versionedAttributes.VersionedObject, + }, + OldObject: runtime.RawExtension{ + Object: versionedAttributes.VersionedOldObject, + }, + DryRun: &dryRun, + Options: runtime.RawExtension{ + Object: attr.GetOperationOptions(), + }, + }, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/doc.go new file mode 100644 index 0000000000..fbacf33717 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package request creates admissionReview request based on admission attributes. +package request // import "k8s.io/apiserver/pkg/admission/plugin/webhook/request" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go new file mode 100644 index 0000000000..924e79bcc9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go @@ -0,0 +1,129 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "strings" + + "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +// Matcher determines if the Attr matches the Rule. +type Matcher struct { + Rule v1.RuleWithOperations + Attr admission.Attributes +} + +// Matches returns if the Attr matches the Rule. +func (r *Matcher) Matches() bool { + return r.scope() && + r.operation() && + r.group() && + r.version() && + r.resource() +} + +func exactOrWildcard(items []string, requested string) bool { + for _, item := range items { + if item == "*" { + return true + } + if item == requested { + return true + } + } + + return false +} + +var namespaceResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} + +func (r *Matcher) scope() bool { + if r.Rule.Scope == nil || *r.Rule.Scope == v1.AllScopes { + return true + } + // attr.GetNamespace() is set to the name of the namespace for requests of the namespace object itself. + switch *r.Rule.Scope { + case v1.NamespacedScope: + // first make sure that we are not requesting a namespace object (namespace objects are cluster-scoped) + return r.Attr.GetResource() != namespaceResource && r.Attr.GetNamespace() != metav1.NamespaceNone + case v1.ClusterScope: + // also return true if the request is for a namespace object (namespace objects are cluster-scoped) + return r.Attr.GetResource() == namespaceResource || r.Attr.GetNamespace() == metav1.NamespaceNone + default: + return false + } +} + +func (r *Matcher) group() bool { + return exactOrWildcard(r.Rule.APIGroups, r.Attr.GetResource().Group) +} + +func (r *Matcher) version() bool { + return exactOrWildcard(r.Rule.APIVersions, r.Attr.GetResource().Version) +} + +func (r *Matcher) operation() bool { + attrOp := r.Attr.GetOperation() + for _, op := range r.Rule.Operations { + if op == v1.OperationAll { + return true + } + // The constants are the same such that this is a valid cast (and this + // is tested). + if op == v1.OperationType(attrOp) { + return true + } + } + return false +} + +func splitResource(resSub string) (res, sub string) { + parts := strings.SplitN(resSub, "/", 2) + if len(parts) == 2 { + return parts[0], parts[1] + } + return parts[0], "" +} + +func (r *Matcher) resource() bool { + opRes, opSub := r.Attr.GetResource().Resource, r.Attr.GetSubresource() + for _, res := range r.Rule.Resources { + res, sub := splitResource(res) + resMatch := res == "*" || res == opRes + subMatch := sub == "*" || sub == opSub + if resMatch && subMatch { + return true + } + } + return false +} + +// IsWebhookConfigurationResource determines if an admission.Attributes object is describing +// the admission of a ValidatingWebhookConfiguration or a MutatingWebhookConfiguration +func IsWebhookConfigurationResource(attr admission.Attributes) bool { + gvk := attr.GetKind() + if gvk.Group == "admissionregistration.k8s.io" { + if gvk.Kind == "ValidatingWebhookConfiguration" || gvk.Kind == "MutatingWebhookConfiguration" { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go new file mode 100644 index 0000000000..f065cdf501 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go @@ -0,0 +1,238 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validating + +import ( + "context" + "fmt" + "sync" + "time" + + v1 "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + webhookerrors "k8s.io/apiserver/pkg/admission/plugin/webhook/errors" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" + webhookrequest "k8s.io/apiserver/pkg/admission/plugin/webhook/request" + webhookutil "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/apiserver/pkg/warning" + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +type validatingDispatcher struct { + cm *webhookutil.ClientManager + plugin *Plugin +} + +func newValidatingDispatcher(p *Plugin) func(cm *webhookutil.ClientManager) generic.Dispatcher { + return func(cm *webhookutil.ClientManager) generic.Dispatcher { + return &validatingDispatcher{cm, p} + } +} + +var _ generic.Dispatcher = &validatingDispatcher{} + +func (d *validatingDispatcher) Dispatch(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces, hooks []webhook.WebhookAccessor) error { + var relevantHooks []*generic.WebhookInvocation + // Construct all the versions we need to call our webhooks + versionedAttrs := map[schema.GroupVersionKind]*generic.VersionedAttributes{} + for _, hook := range hooks { + invocation, statusError := d.plugin.ShouldCallHook(hook, attr, o) + if statusError != nil { + return statusError + } + if invocation == nil { + continue + } + relevantHooks = append(relevantHooks, invocation) + // If we already have this version, continue + if _, ok := versionedAttrs[invocation.Kind]; ok { + continue + } + versionedAttr, err := generic.NewVersionedAttributes(attr, invocation.Kind, o) + if err != nil { + return apierrors.NewInternalError(err) + } + versionedAttrs[invocation.Kind] = versionedAttr + } + + if len(relevantHooks) == 0 { + // no matching hooks + return nil + } + + // Check if the request has already timed out before spawning remote calls + select { + case <-ctx.Done(): + // parent context is canceled or timed out, no point in continuing + return apierrors.NewTimeoutError("request did not complete within requested timeout", 0) + default: + } + + wg := sync.WaitGroup{} + errCh := make(chan error, len(relevantHooks)) + wg.Add(len(relevantHooks)) + for i := range relevantHooks { + go func(invocation *generic.WebhookInvocation) { + defer wg.Done() + hook, ok := invocation.Webhook.GetValidatingWebhook() + if !ok { + utilruntime.HandleError(fmt.Errorf("validating webhook dispatch requires v1.ValidatingWebhook, but got %T", hook)) + return + } + versionedAttr := versionedAttrs[invocation.Kind] + t := time.Now() + err := d.callHook(ctx, hook, invocation, versionedAttr) + ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == v1.Ignore + rejected := false + if err != nil { + switch err := err.(type) { + case *webhookutil.ErrCallingWebhook: + if !ignoreClientCallFailures { + rejected = true + admissionmetrics.Metrics.ObserveWebhookRejection(hook.Name, "validating", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionCallingWebhookError, 0) + } + case *webhookutil.ErrWebhookRejection: + rejected = true + admissionmetrics.Metrics.ObserveWebhookRejection(hook.Name, "validating", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionNoError, int(err.Status.ErrStatus.Code)) + default: + rejected = true + admissionmetrics.Metrics.ObserveWebhookRejection(hook.Name, "validating", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionAPIServerInternalError, 0) + } + } + admissionmetrics.Metrics.ObserveWebhook(time.Since(t), rejected, versionedAttr.Attributes, "validating", hook.Name) + if err == nil { + return + } + + if callErr, ok := err.(*webhookutil.ErrCallingWebhook); ok { + if ignoreClientCallFailures { + klog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) + utilruntime.HandleError(callErr) + return + } + + klog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) + errCh <- apierrors.NewInternalError(err) + return + } + + if rejectionErr, ok := err.(*webhookutil.ErrWebhookRejection); ok { + err = rejectionErr.Status + } + klog.Warningf("rejected by webhook %q: %#v", hook.Name, err) + errCh <- err + }(relevantHooks[i]) + } + wg.Wait() + close(errCh) + + var errs []error + for e := range errCh { + errs = append(errs, e) + } + if len(errs) == 0 { + return nil + } + if len(errs) > 1 { + for i := 1; i < len(errs); i++ { + // TODO: merge status errors; until then, just return the first one. + utilruntime.HandleError(errs[i]) + } + } + return errs[0] +} + +func (d *validatingDispatcher) callHook(ctx context.Context, h *v1.ValidatingWebhook, invocation *generic.WebhookInvocation, attr *generic.VersionedAttributes) error { + if attr.Attributes.IsDryRun() { + if h.SideEffects == nil { + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("Webhook SideEffects is nil")} + } + if !(*h.SideEffects == v1.SideEffectClassNone || *h.SideEffects == v1.SideEffectClassNoneOnDryRun) { + return webhookerrors.NewDryRunUnsupportedErr(h.Name) + } + } + + uid, request, response, err := webhookrequest.CreateAdmissionObjects(attr, invocation) + if err != nil { + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + // Make the webhook request + client, err := invocation.Webhook.GetRESTClient(d.cm) + if err != nil { + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + trace := utiltrace.New("Call validating webhook", + utiltrace.Field{"configuration", invocation.Webhook.GetConfigurationName()}, + utiltrace.Field{"webhook", h.Name}, + utiltrace.Field{"resource", attr.GetResource()}, + utiltrace.Field{"subresource", attr.GetSubresource()}, + utiltrace.Field{"operation", attr.GetOperation()}, + utiltrace.Field{"UID", uid}) + defer trace.LogIfLong(500 * time.Millisecond) + + // if the webhook has a specific timeout, wrap the context to apply it + if h.TimeoutSeconds != nil { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(*h.TimeoutSeconds)*time.Second) + defer cancel() + } + + r := client.Post().Body(request) + + // if the context has a deadline, set it as a parameter to inform the backend + if deadline, hasDeadline := ctx.Deadline(); hasDeadline { + // compute the timeout + if timeout := time.Until(deadline); timeout > 0 { + // if it's not an even number of seconds, round up to the nearest second + if truncated := timeout.Truncate(time.Second); truncated != timeout { + timeout = truncated + time.Second + } + // set the timeout + r.Timeout(timeout) + } + } + + if err := r.Do(ctx).Into(response); err != nil { + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + trace.Step("Request completed") + + result, err := webhookrequest.VerifyAdmissionResponse(uid, false, response) + if err != nil { + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + + for k, v := range result.AuditAnnotations { + key := h.Name + "/" + k + if err := attr.Attributes.AddAnnotation(key, v); err != nil { + klog.Warningf("Failed to set admission audit annotation %s to %s for validating webhook %s: %v", key, v, h.Name, err) + } + } + for _, w := range result.Warnings { + warning.AddWarning(ctx, "", w) + } + if result.Allowed { + return nil + } + return &webhookutil.ErrWebhookRejection{Status: webhookerrors.ToStatusErr(h.Name, result.Result)} +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/doc.go new file mode 100644 index 0000000000..ede53c668f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validating makes calls to validating (i.e., non-mutating) webhooks +// during the admission process. +package validating // import "k8s.io/apiserver/pkg/admission/plugin/webhook/validating" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/plugin.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/plugin.go new file mode 100644 index 0000000000..6972877b18 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/plugin.go @@ -0,0 +1,67 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validating + +import ( + "context" + "io" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/configuration" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" +) + +const ( + // PluginName indicates the name of admission plug-in + PluginName = "ValidatingAdmissionWebhook" +) + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(configFile io.Reader) (admission.Interface, error) { + plugin, err := NewValidatingAdmissionWebhook(configFile) + if err != nil { + return nil, err + } + + return plugin, nil + }) +} + +// Plugin is an implementation of admission.Interface. +type Plugin struct { + *generic.Webhook +} + +var _ admission.ValidationInterface = &Plugin{} + +// NewValidatingAdmissionWebhook returns a generic admission webhook plugin. +func NewValidatingAdmissionWebhook(configFile io.Reader) (*Plugin, error) { + handler := admission.NewHandler(admission.Connect, admission.Create, admission.Delete, admission.Update) + p := &Plugin{} + var err error + p.Webhook, err = generic.NewWebhook(handler, configFile, configuration.NewValidatingWebhookConfigurationManager, newValidatingDispatcher(p)) + if err != nil { + return nil, err + } + return p, nil +} + +// Validate makes an admission decision based on the request attributes. +func (a *Plugin) Validate(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + return a.Webhook.Dispatch(ctx, attr, o) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugins.go b/vendor/k8s.io/apiserver/pkg/admission/plugins.go new file mode 100644 index 0000000000..e6da6f4a7f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugins.go @@ -0,0 +1,208 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "reflect" + "sort" + "strings" + "sync" + + "k8s.io/klog/v2" +) + +// Factory is a function that returns an Interface for admission decisions. +// The config parameter provides an io.Reader handler to the factory in +// order to load specific configurations. If no configuration is provided +// the parameter is nil. +type Factory func(config io.Reader) (Interface, error) + +type Plugins struct { + lock sync.Mutex + registry map[string]Factory +} + +func NewPlugins() *Plugins { + return &Plugins{} +} + +// All registered admission options. +var ( + // PluginEnabledFn checks whether a plugin is enabled. By default, if you ask about it, it's enabled. + PluginEnabledFn = func(name string, config io.Reader) bool { + return true + } +) + +// PluginEnabledFunc is a function type that can provide an external check on whether an admission plugin may be enabled +type PluginEnabledFunc func(name string, config io.Reader) bool + +// Registered enumerates the names of all registered plugins. +func (ps *Plugins) Registered() []string { + ps.lock.Lock() + defer ps.lock.Unlock() + keys := []string{} + for k := range ps.registry { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// Register registers a plugin Factory by name. This +// is expected to happen during app startup. +func (ps *Plugins) Register(name string, plugin Factory) { + ps.lock.Lock() + defer ps.lock.Unlock() + if ps.registry != nil { + _, found := ps.registry[name] + if found { + klog.Fatalf("Admission plugin %q was registered twice", name) + } + } else { + ps.registry = map[string]Factory{} + } + + klog.V(1).Infof("Registered admission plugin %q", name) + ps.registry[name] = plugin +} + +// getPlugin creates an instance of the named plugin. It returns `false` if the +// the name is not known. The error is returned only when the named provider was +// known but failed to initialize. The config parameter specifies the io.Reader +// handler of the configuration file for the cloud provider, or nil for no configuration. +func (ps *Plugins) getPlugin(name string, config io.Reader) (Interface, bool, error) { + ps.lock.Lock() + defer ps.lock.Unlock() + f, found := ps.registry[name] + if !found { + return nil, false, nil + } + + config1, config2, err := splitStream(config) + if err != nil { + return nil, true, err + } + if !PluginEnabledFn(name, config1) { + return nil, true, nil + } + + ret, err := f(config2) + return ret, true, err +} + +// splitStream reads the stream bytes and constructs two copies of it. +func splitStream(config io.Reader) (io.Reader, io.Reader, error) { + if config == nil || reflect.ValueOf(config).IsNil() { + return nil, nil, nil + } + + configBytes, err := ioutil.ReadAll(config) + if err != nil { + return nil, nil, err + } + + return bytes.NewBuffer(configBytes), bytes.NewBuffer(configBytes), nil +} + +// NewFromPlugins returns an admission.Interface that will enforce admission control decisions of all +// the given plugins. +func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigProvider, pluginInitializer PluginInitializer, decorator Decorator) (Interface, error) { + handlers := []Interface{} + mutationPlugins := []string{} + validationPlugins := []string{} + for _, pluginName := range pluginNames { + pluginConfig, err := configProvider.ConfigFor(pluginName) + if err != nil { + return nil, err + } + + plugin, err := ps.InitPlugin(pluginName, pluginConfig, pluginInitializer) + if err != nil { + return nil, err + } + if plugin != nil { + if decorator != nil { + handlers = append(handlers, decorator.Decorate(plugin, pluginName)) + } else { + handlers = append(handlers, plugin) + } + + if _, ok := plugin.(MutationInterface); ok { + mutationPlugins = append(mutationPlugins, pluginName) + } + if _, ok := plugin.(ValidationInterface); ok { + validationPlugins = append(validationPlugins, pluginName) + } + } + } + if len(mutationPlugins) != 0 { + klog.Infof("Loaded %d mutating admission controller(s) successfully in the following order: %s.", len(mutationPlugins), strings.Join(mutationPlugins, ",")) + } + if len(validationPlugins) != 0 { + klog.Infof("Loaded %d validating admission controller(s) successfully in the following order: %s.", len(validationPlugins), strings.Join(validationPlugins, ",")) + } + return newReinvocationHandler(chainAdmissionHandler(handlers)), nil +} + +// InitPlugin creates an instance of the named interface. +func (ps *Plugins) InitPlugin(name string, config io.Reader, pluginInitializer PluginInitializer) (Interface, error) { + if name == "" { + klog.Info("No admission plugin specified.") + return nil, nil + } + + plugin, found, err := ps.getPlugin(name, config) + if err != nil { + return nil, fmt.Errorf("couldn't init admission plugin %q: %v", name, err) + } + if !found { + return nil, fmt.Errorf("unknown admission plugin: %s", name) + } + + pluginInitializer.Initialize(plugin) + // ensure that plugins have been properly initialized + if err := ValidateInitialization(plugin); err != nil { + return nil, fmt.Errorf("failed to initialize admission plugin %q: %v", name, err) + } + + return plugin, nil +} + +// ValidateInitialization will call the InitializationValidate function in each plugin if they implement +// the InitializationValidator interface. +func ValidateInitialization(plugin Interface) error { + if validater, ok := plugin.(InitializationValidator); ok { + err := validater.ValidateInitialization() + if err != nil { + return err + } + } + return nil +} + +type PluginInitializers []PluginInitializer + +func (pp PluginInitializers) Initialize(plugin Interface) { + for _, p := range pp { + p.Initialize(plugin) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/reinvocation.go b/vendor/k8s.io/apiserver/pkg/admission/reinvocation.go new file mode 100644 index 0000000000..f93c703a11 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/reinvocation.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import "context" + +// newReinvocationHandler creates a handler that wraps the provided admission chain and reinvokes it +// if needed according to re-invocation policy of the webhooks. +func newReinvocationHandler(admissionChain Interface) Interface { + return &reinvoker{admissionChain} +} + +type reinvoker struct { + admissionChain Interface +} + +// Admit performs an admission control check using the wrapped admission chain, reinvoking the +// admission chain if needed according to the reinvocation policy. Plugins are expected to check +// the admission attributes' reinvocation context against their reinvocation policy to decide if +// they should re-run, and to update the reinvocation context if they perform any mutations. +func (r *reinvoker) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error { + if mutator, ok := r.admissionChain.(MutationInterface); ok { + err := mutator.Admit(ctx, a, o) + if err != nil { + return err + } + s := a.GetReinvocationContext() + if s.ShouldReinvoke() { + s.SetIsReinvoke() + // Calling admit a second time will reinvoke all in-tree plugins + // as well as any webhook plugins that need to be reinvoked based on the + // reinvocation policy. + return mutator.Admit(ctx, a, o) + } + } + return nil +} + +// Validate performs an admission control check using the wrapped admission chain, and returns immediately on first error. +func (r *reinvoker) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error { + if validator, ok := r.admissionChain.(ValidationInterface); ok { + return validator.Validate(ctx, a, o) + } + return nil +} + +// Handles will return true if any of the admission chain handlers handle the given operation. +func (r *reinvoker) Handles(operation Operation) bool { + return r.admissionChain.Handles(operation) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/util.go b/vendor/k8s.io/apiserver/pkg/admission/util.go new file mode 100644 index 0000000000..842932f73e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/util.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import "k8s.io/apimachinery/pkg/runtime" + +type RuntimeObjectInterfaces struct { + runtime.ObjectCreater + runtime.ObjectTyper + runtime.ObjectDefaulter + runtime.ObjectConvertor + runtime.EquivalentResourceMapper +} + +func NewObjectInterfacesFromScheme(scheme *runtime.Scheme) ObjectInterfaces { + return &RuntimeObjectInterfaces{scheme, scheme, scheme, scheme, runtime.NewEquivalentResourceRegistry()} +} + +func (r *RuntimeObjectInterfaces) GetObjectCreater() runtime.ObjectCreater { + return r.ObjectCreater +} +func (r *RuntimeObjectInterfaces) GetObjectTyper() runtime.ObjectTyper { + return r.ObjectTyper +} +func (r *RuntimeObjectInterfaces) GetObjectDefaulter() runtime.ObjectDefaulter { + return r.ObjectDefaulter +} +func (r *RuntimeObjectInterfaces) GetObjectConvertor() runtime.ObjectConvertor { + return r.ObjectConvertor +} +func (r *RuntimeObjectInterfaces) GetEquivalentResourceMapper() runtime.EquivalentResourceMapper { + return r.EquivalentResourceMapper +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go new file mode 100644 index 0000000000..88db1ffa67 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=apiserver.k8s.io + +// Package apiserver is the internal version of the API. +package apiserver // import "k8s.io/apiserver/pkg/apis/apiserver" diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go new file mode 100644 index 0000000000..8a8202cb5b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/apis/apiserver" + v1 "k8s.io/apiserver/pkg/apis/apiserver/v1" + "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" + "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" +) + +// Install registers the API group and adds types to a scheme +func Install(scheme *runtime.Scheme) { + utilruntime.Must(apiserver.AddToScheme(scheme)) + + // v1alpha is in the k8s.io-suffixed API group + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion)) + + // v1alpha is in the k8s.io-suffixed API group + utilruntime.Must(v1beta1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1beta1.SchemeGroupVersion)) + + // v1 is in the config.k8s.io-suffixed API group + utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion)) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go new file mode 100644 index 0000000000..7466f45460 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiserver + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const LegacyGroupName = "apiserver.k8s.io" +const GroupName = "apiserver.config.k8s.io" + +// LegacySchemeGroupVersion is group version used to register these objects +var LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: runtime.APIVersionInternal} + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &AdmissionConfiguration{}, + &EgressSelectorConfiguration{}, + ) + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionConfiguration{}, + &EgressSelectorConfiguration{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go new file mode 100644 index 0000000000..197ce65373 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go @@ -0,0 +1,149 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiserver + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdmissionConfiguration provides versioned configuration for admission controllers. +type AdmissionConfiguration struct { + metav1.TypeMeta + + // Plugins allows specifying a configuration per admission control plugin. + // +optional + Plugins []AdmissionPluginConfiguration +} + +// AdmissionPluginConfiguration provides the configuration for a single plug-in. +type AdmissionPluginConfiguration struct { + // Name is the name of the admission controller. + // It must match the registered admission plugin name. + Name string + + // Path is the path to a configuration file that contains the plugin's + // configuration + // +optional + Path string + + // Configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + // +optional + Configuration *runtime.Unknown +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressSelectorConfiguration provides versioned configuration for egress selector clients. +type EgressSelectorConfiguration struct { + metav1.TypeMeta + + // EgressSelections contains a list of egress selection client configurations + EgressSelections []EgressSelection +} + +// EgressSelection provides the configuration for a single egress selection client. +type EgressSelection struct { + // Name is the name of the egress selection. + // Currently supported values are "controlplane", "master", "etcd" and "cluster" + // The "master" egress selector is deprecated in favor of "controlplane" + Name string + + // Connection is the exact information used to configure the egress selection + Connection Connection +} + +// Connection provides the configuration for a single egress selection client. +type Connection struct { + // Protocol is the protocol used to connect from client to the konnectivity server. + ProxyProtocol ProtocolType + + // Transport defines the transport configurations we use to dial to the konnectivity server. + // This is required if ProxyProtocol is HTTPConnect or GRPC. + // +optional + Transport *Transport +} + +// ProtocolType is a set of valid values for Connection.ProtocolType +type ProtocolType string + +// Valid types for ProtocolType for konnectivity server +const ( + // Use HTTPConnect to connect to konnectivity server + ProtocolHTTPConnect ProtocolType = "HTTPConnect" + // Use grpc to connect to konnectivity server + ProtocolGRPC ProtocolType = "GRPC" + // Connect directly (skip konnectivity server) + ProtocolDirect ProtocolType = "Direct" +) + +// Transport defines the transport configurations we use to dial to the konnectivity server +type Transport struct { + // TCP is the TCP configuration for communicating with the konnectivity server via TCP + // ProxyProtocol of GRPC is not supported with TCP transport at the moment + // Requires at least one of TCP or UDS to be set + // +optional + TCP *TCPTransport + + // UDS is the UDS configuration for communicating with the konnectivity server via UDS + // Requires at least one of TCP or UDS to be set + // +optional + UDS *UDSTransport +} + +// TCPTransport provides the information to connect to konnectivity server via TCP +type TCPTransport struct { + // URL is the location of the konnectivity server to connect to. + // As an example it might be "https://127.0.0.1:8131" + URL string + + // TLSConfig is the config needed to use TLS when connecting to konnectivity server + // +optional + TLSConfig *TLSConfig +} + +// UDSTransport provides the information to connect to konnectivity server via UDS +type UDSTransport struct { + // UDSName is the name of the unix domain socket to connect to konnectivity server + // This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket) + UDSName string +} + +// TLSConfig provides the authentication information to connect to konnectivity server +// Only used with TCPTransport +type TLSConfig struct { + // caBundle is the file location of the CA to be used to determine trust with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // If absent while TCPTransport.URL is prefixed with https://, default to system trust roots. + // +optional + CABundle string + + // clientKey is the file location of the client key to authenticate with the konnectivity server + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // Must be configured if TCPTransport.URL is prefixed with https:// + // +optional + ClientKey string + + // clientCert is the file location of the client certificate to authenticate with the konnectivity server + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // Must be configured if TCPTransport.URL is prefixed with https:// + // +optional + ClientCert string +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go new file mode 100644 index 0000000000..91458aee4e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io + +// Package v1 is the v1 version of the API. +package v1 // import "k8s.io/apiserver/pkg/apis/apiserver/v1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go new file mode 100644 index 0000000000..8d3bf987f9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionConfiguration{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go new file mode 100644 index 0000000000..e139dceb9d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdmissionConfiguration provides versioned configuration for admission controllers. +type AdmissionConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // Plugins allows specifying a configuration per admission control plugin. + // +optional + Plugins []AdmissionPluginConfiguration `json:"plugins"` +} + +// AdmissionPluginConfiguration provides the configuration for a single plug-in. +type AdmissionPluginConfiguration struct { + // Name is the name of the admission controller. + // It must match the registered admission plugin name. + Name string `json:"name"` + + // Path is the path to a configuration file that contains the plugin's + // configuration + // +optional + Path string `json:"path"` + + // Configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + // +optional + Configuration *runtime.Unknown `json:"configuration"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..5116139ae9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go @@ -0,0 +1,103 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + apiserver "k8s.io/apiserver/pkg/apis/apiserver" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AdmissionConfiguration)(nil), (*apiserver.AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(a.(*AdmissionConfiguration), b.(*apiserver.AdmissionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionConfiguration)(nil), (*AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(a.(*apiserver.AdmissionConfiguration), b.(*AdmissionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AdmissionPluginConfiguration)(nil), (*apiserver.AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(a.(*AdmissionPluginConfiguration), b.(*apiserver.AdmissionPluginConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionPluginConfiguration)(nil), (*AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(a.(*apiserver.AdmissionPluginConfiguration), b.(*AdmissionPluginConfiguration), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error { + out.Plugins = *(*[]apiserver.AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) + return nil +} + +// Convert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration is an autogenerated conversion function. +func Convert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error { + return autoConvert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in, out, s) +} + +func autoConvert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error { + out.Plugins = *(*[]AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) + return nil +} + +// Convert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration is an autogenerated conversion function. +func Convert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(in, out, s) +} + +func autoConvert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.Path = in.Path + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) + return nil +} + +// Convert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration is an autogenerated conversion function. +func Convert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error { + return autoConvert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in, out, s) +} + +func autoConvert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.Path = in.Path + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) + return nil +} + +// Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration is an autogenerated conversion function. +func Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a0e039de0d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go @@ -0,0 +1,78 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]AdmissionPluginConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfiguration. +func (in *AdmissionConfiguration) DeepCopy() *AdmissionConfiguration { + if in == nil { + return nil + } + out := new(AdmissionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfiguration. +func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration { + if in == nil { + return nil + } + out := new(AdmissionPluginConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..cce2e603a6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go new file mode 100644 index 0000000000..82ebd0c455 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.k8s.io + +// Package v1alpha1 is the v1alpha1 version of the API. +package v1alpha1 // import "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go new file mode 100644 index 0000000000..758be628a7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiserver.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionConfiguration{}, + &EgressSelectorConfiguration{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go new file mode 100644 index 0000000000..49509bf22c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go @@ -0,0 +1,149 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdmissionConfiguration provides versioned configuration for admission controllers. +type AdmissionConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // Plugins allows specifying a configuration per admission control plugin. + // +optional + Plugins []AdmissionPluginConfiguration `json:"plugins"` +} + +// AdmissionPluginConfiguration provides the configuration for a single plug-in. +type AdmissionPluginConfiguration struct { + // Name is the name of the admission controller. + // It must match the registered admission plugin name. + Name string `json:"name"` + + // Path is the path to a configuration file that contains the plugin's + // configuration + // +optional + Path string `json:"path"` + + // Configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + // +optional + Configuration *runtime.Unknown `json:"configuration"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressSelectorConfiguration provides versioned configuration for egress selector clients. +type EgressSelectorConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // connectionServices contains a list of egress selection client configurations + EgressSelections []EgressSelection `json:"egressSelections"` +} + +// EgressSelection provides the configuration for a single egress selection client. +type EgressSelection struct { + // name is the name of the egress selection. + // Currently supported values are "controlplane", "master", "etcd" and "cluster" + // The "master" egress selector is deprecated in favor of "controlplane" + Name string `json:"name"` + + // connection is the exact information used to configure the egress selection + Connection Connection `json:"connection"` +} + +// Connection provides the configuration for a single egress selection client. +type Connection struct { + // Protocol is the protocol used to connect from client to the konnectivity server. + ProxyProtocol ProtocolType `json:"proxyProtocol,omitempty"` + + // Transport defines the transport configurations we use to dial to the konnectivity server. + // This is required if ProxyProtocol is HTTPConnect or GRPC. + // +optional + Transport *Transport `json:"transport,omitempty"` +} + +// ProtocolType is a set of valid values for Connection.ProtocolType +type ProtocolType string + +// Valid types for ProtocolType for konnectivity server +const ( + // Use HTTPConnect to connect to konnectivity server + ProtocolHTTPConnect ProtocolType = "HTTPConnect" + // Use grpc to connect to konnectivity server + ProtocolGRPC ProtocolType = "GRPC" + // Connect directly (skip konnectivity server) + ProtocolDirect ProtocolType = "Direct" +) + +// Transport defines the transport configurations we use to dial to the konnectivity server +type Transport struct { + // TCP is the TCP configuration for communicating with the konnectivity server via TCP + // ProxyProtocol of GRPC is not supported with TCP transport at the moment + // Requires at least one of TCP or UDS to be set + // +optional + TCP *TCPTransport `json:"tcp,omitempty"` + + // UDS is the UDS configuration for communicating with the konnectivity server via UDS + // Requires at least one of TCP or UDS to be set + // +optional + UDS *UDSTransport `json:"uds,omitempty"` +} + +// TCPTransport provides the information to connect to konnectivity server via TCP +type TCPTransport struct { + // URL is the location of the konnectivity server to connect to. + // As an example it might be "https://127.0.0.1:8131" + URL string `json:"url,omitempty"` + + // TLSConfig is the config needed to use TLS when connecting to konnectivity server + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` +} + +// UDSTransport provides the information to connect to konnectivity server via UDS +type UDSTransport struct { + // UDSName is the name of the unix domain socket to connect to konnectivity server + // This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket) + UDSName string `json:"udsName,omitempty"` +} + +// TLSConfig provides the authentication information to connect to konnectivity server +// Only used with TCPTransport +type TLSConfig struct { + // caBundle is the file location of the CA to be used to determine trust with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // If absent while TCPTransport.URL is prefixed with https://, default to system trust roots. + // +optional + CABundle string `json:"caBundle,omitempty"` + + // clientKey is the file location of the client key to be used in mtls handshakes with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // Must be configured if TCPTransport.URL is prefixed with https:// + // +optional + ClientKey string `json:"clientKey,omitempty"` + + // clientCert is the file location of the client certificate to be used in mtls handshakes with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // Must be configured if TCPTransport.URL is prefixed with https:// + // +optional + ClientCert string `json:"clientCert,omitempty"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go new file mode 100644 index 0000000000..9174b16df7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,329 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + apiserver "k8s.io/apiserver/pkg/apis/apiserver" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AdmissionConfiguration)(nil), (*apiserver.AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(a.(*AdmissionConfiguration), b.(*apiserver.AdmissionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionConfiguration)(nil), (*AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(a.(*apiserver.AdmissionConfiguration), b.(*AdmissionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AdmissionPluginConfiguration)(nil), (*apiserver.AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(a.(*AdmissionPluginConfiguration), b.(*apiserver.AdmissionPluginConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionPluginConfiguration)(nil), (*AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(a.(*apiserver.AdmissionPluginConfiguration), b.(*AdmissionPluginConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Connection)(nil), (*apiserver.Connection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Connection_To_apiserver_Connection(a.(*Connection), b.(*apiserver.Connection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.Connection)(nil), (*Connection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_Connection_To_v1alpha1_Connection(a.(*apiserver.Connection), b.(*Connection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelection)(nil), (*EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(a.(*apiserver.EgressSelection), b.(*EgressSelection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*EgressSelectorConfiguration)(nil), (*apiserver.EgressSelectorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(a.(*EgressSelectorConfiguration), b.(*apiserver.EgressSelectorConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelectorConfiguration)(nil), (*EgressSelectorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(a.(*apiserver.EgressSelectorConfiguration), b.(*EgressSelectorConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TCPTransport)(nil), (*apiserver.TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(a.(*TCPTransport), b.(*apiserver.TCPTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.TCPTransport)(nil), (*TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(a.(*apiserver.TCPTransport), b.(*TCPTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TLSConfig)(nil), (*apiserver.TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(a.(*TLSConfig), b.(*apiserver.TLSConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.TLSConfig)(nil), (*TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(a.(*apiserver.TLSConfig), b.(*TLSConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Transport)(nil), (*apiserver.Transport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Transport_To_apiserver_Transport(a.(*Transport), b.(*apiserver.Transport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.Transport)(nil), (*Transport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_Transport_To_v1alpha1_Transport(a.(*apiserver.Transport), b.(*Transport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*UDSTransport)(nil), (*apiserver.UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(a.(*UDSTransport), b.(*apiserver.UDSTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.UDSTransport)(nil), (*UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(a.(*apiserver.UDSTransport), b.(*UDSTransport), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error { + out.Plugins = *(*[]apiserver.AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) + return nil +} + +// Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in, out, s) +} + +func autoConvert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error { + out.Plugins = *(*[]AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) + return nil +} + +// Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration is an autogenerated conversion function. +func Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.Path = in.Path + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) + return nil +} + +// Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in, out, s) +} + +func autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.Path = in.Path + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) + return nil +} + +// Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration is an autogenerated conversion function. +func Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error { + out.ProxyProtocol = apiserver.ProtocolType(in.ProxyProtocol) + out.Transport = (*apiserver.Transport)(unsafe.Pointer(in.Transport)) + return nil +} + +// Convert_v1alpha1_Connection_To_apiserver_Connection is an autogenerated conversion function. +func Convert_v1alpha1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error { + return autoConvert_v1alpha1_Connection_To_apiserver_Connection(in, out, s) +} + +func autoConvert_apiserver_Connection_To_v1alpha1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error { + out.ProxyProtocol = ProtocolType(in.ProxyProtocol) + out.Transport = (*Transport)(unsafe.Pointer(in.Transport)) + return nil +} + +// Convert_apiserver_Connection_To_v1alpha1_Connection is an autogenerated conversion function. +func Convert_apiserver_Connection_To_v1alpha1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error { + return autoConvert_apiserver_Connection_To_v1alpha1_Connection(in, out, s) +} + +func autoConvert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1alpha1_Connection_To_apiserver_Connection(&in.Connection, &out.Connection, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection is an autogenerated conversion function. +func Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error { + return autoConvert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in, out, s) +} + +func autoConvert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_apiserver_Connection_To_v1alpha1_Connection(&in.Connection, &out.Connection, s); err != nil { + return err + } + return nil +} + +// Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection is an autogenerated conversion function. +func Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error { + return autoConvert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(in, out, s) +} + +func autoConvert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error { + out.EgressSelections = *(*[]apiserver.EgressSelection)(unsafe.Pointer(&in.EgressSelections)) + return nil +} + +// Convert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in, out, s) +} + +func autoConvert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error { + out.EgressSelections = *(*[]EgressSelection)(unsafe.Pointer(&in.EgressSelections)) + return nil +} + +// Convert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration is an autogenerated conversion function. +func Convert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error { + out.URL = in.URL + out.TLSConfig = (*apiserver.TLSConfig)(unsafe.Pointer(in.TLSConfig)) + return nil +} + +// Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport is an autogenerated conversion function. +func Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error { + return autoConvert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in, out, s) +} + +func autoConvert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error { + out.URL = in.URL + out.TLSConfig = (*TLSConfig)(unsafe.Pointer(in.TLSConfig)) + return nil +} + +// Convert_apiserver_TCPTransport_To_v1alpha1_TCPTransport is an autogenerated conversion function. +func Convert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error { + return autoConvert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(in, out, s) +} + +func autoConvert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error { + out.CABundle = in.CABundle + out.ClientKey = in.ClientKey + out.ClientCert = in.ClientCert + return nil +} + +// Convert_v1alpha1_TLSConfig_To_apiserver_TLSConfig is an autogenerated conversion function. +func Convert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(in, out, s) +} + +func autoConvert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error { + out.CABundle = in.CABundle + out.ClientKey = in.ClientKey + out.ClientCert = in.ClientCert + return nil +} + +// Convert_apiserver_TLSConfig_To_v1alpha1_TLSConfig is an autogenerated conversion function. +func Convert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error { + return autoConvert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(in, out, s) +} + +func autoConvert_v1alpha1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error { + out.TCP = (*apiserver.TCPTransport)(unsafe.Pointer(in.TCP)) + out.UDS = (*apiserver.UDSTransport)(unsafe.Pointer(in.UDS)) + return nil +} + +// Convert_v1alpha1_Transport_To_apiserver_Transport is an autogenerated conversion function. +func Convert_v1alpha1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error { + return autoConvert_v1alpha1_Transport_To_apiserver_Transport(in, out, s) +} + +func autoConvert_apiserver_Transport_To_v1alpha1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error { + out.TCP = (*TCPTransport)(unsafe.Pointer(in.TCP)) + out.UDS = (*UDSTransport)(unsafe.Pointer(in.UDS)) + return nil +} + +// Convert_apiserver_Transport_To_v1alpha1_Transport is an autogenerated conversion function. +func Convert_apiserver_Transport_To_v1alpha1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error { + return autoConvert_apiserver_Transport_To_v1alpha1_Transport(in, out, s) +} + +func autoConvert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error { + out.UDSName = in.UDSName + return nil +} + +// Convert_v1alpha1_UDSTransport_To_apiserver_UDSTransport is an autogenerated conversion function. +func Convert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error { + return autoConvert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(in, out, s) +} + +func autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error { + out.UDSName = in.UDSName + return nil +} + +// Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport is an autogenerated conversion function. +func Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error { + return autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..4498e408f0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,227 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]AdmissionPluginConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfiguration. +func (in *AdmissionConfiguration) DeepCopy() *AdmissionConfiguration { + if in == nil { + return nil + } + out := new(AdmissionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfiguration. +func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration { + if in == nil { + return nil + } + out := new(AdmissionPluginConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Connection) DeepCopyInto(out *Connection) { + *out = *in + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(Transport) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection. +func (in *Connection) DeepCopy() *Connection { + if in == nil { + return nil + } + out := new(Connection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressSelection) DeepCopyInto(out *EgressSelection) { + *out = *in + in.Connection.DeepCopyInto(&out.Connection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelection. +func (in *EgressSelection) DeepCopy() *EgressSelection { + if in == nil { + return nil + } + out := new(EgressSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressSelectorConfiguration) DeepCopyInto(out *EgressSelectorConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]EgressSelection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelectorConfiguration. +func (in *EgressSelectorConfiguration) DeepCopy() *EgressSelectorConfiguration { + if in == nil { + return nil + } + out := new(EgressSelectorConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPTransport) DeepCopyInto(out *TCPTransport) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport. +func (in *TCPTransport) DeepCopy() *TCPTransport { + if in == nil { + return nil + } + out := new(TCPTransport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Transport) DeepCopyInto(out *Transport) { + *out = *in + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = new(TCPTransport) + (*in).DeepCopyInto(*out) + } + if in.UDS != nil { + in, out := &in.UDS, &out.UDS + *out = new(UDSTransport) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport. +func (in *Transport) DeepCopy() *Transport { + if in == nil { + return nil + } + out := new(Transport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDSTransport) DeepCopyInto(out *UDSTransport) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport. +func (in *UDSTransport) DeepCopy() *UDSTransport { + if in == nil { + return nil + } + out := new(UDSTransport) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go new file mode 100644 index 0000000000..dd621a3acd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/doc.go new file mode 100644 index 0000000000..07321e775d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.k8s.io + +// Package v1beta1 is the v1beta1 version of the API. +package v1beta1 // import "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/register.go new file mode 100644 index 0000000000..9ea529472f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiserver.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EgressSelectorConfiguration{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go new file mode 100644 index 0000000000..ea22b403a3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go @@ -0,0 +1,120 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressSelectorConfiguration provides versioned configuration for egress selector clients. +type EgressSelectorConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // connectionServices contains a list of egress selection client configurations + EgressSelections []EgressSelection `json:"egressSelections"` +} + +// EgressSelection provides the configuration for a single egress selection client. +type EgressSelection struct { + // name is the name of the egress selection. + // Currently supported values are "controlplane", "master", "etcd" and "cluster" + // The "master" egress selector is deprecated in favor of "controlplane" + Name string `json:"name"` + + // connection is the exact information used to configure the egress selection + Connection Connection `json:"connection"` +} + +// Connection provides the configuration for a single egress selection client. +type Connection struct { + // Protocol is the protocol used to connect from client to the konnectivity server. + ProxyProtocol ProtocolType `json:"proxyProtocol,omitempty"` + + // Transport defines the transport configurations we use to dial to the konnectivity server. + // This is required if ProxyProtocol is HTTPConnect or GRPC. + // +optional + Transport *Transport `json:"transport,omitempty"` +} + +// ProtocolType is a set of valid values for Connection.ProtocolType +type ProtocolType string + +// Valid types for ProtocolType for konnectivity server +const ( + // Use HTTPConnect to connect to konnectivity server + ProtocolHTTPConnect ProtocolType = "HTTPConnect" + // Use grpc to connect to konnectivity server + ProtocolGRPC ProtocolType = "GRPC" + // Connect directly (skip konnectivity server) + ProtocolDirect ProtocolType = "Direct" +) + +// Transport defines the transport configurations we use to dial to the konnectivity server +type Transport struct { + // TCP is the TCP configuration for communicating with the konnectivity server via TCP + // ProxyProtocol of GRPC is not supported with TCP transport at the moment + // Requires at least one of TCP or UDS to be set + // +optional + TCP *TCPTransport `json:"tcp,omitempty"` + + // UDS is the UDS configuration for communicating with the konnectivity server via UDS + // Requires at least one of TCP or UDS to be set + // +optional + UDS *UDSTransport `json:"uds,omitempty"` +} + +// TCPTransport provides the information to connect to konnectivity server via TCP +type TCPTransport struct { + // URL is the location of the konnectivity server to connect to. + // As an example it might be "https://127.0.0.1:8131" + URL string `json:"url,omitempty"` + + // TLSConfig is the config needed to use TLS when connecting to konnectivity server + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` +} + +// UDSTransport provides the information to connect to konnectivity server via UDS +type UDSTransport struct { + // UDSName is the name of the unix domain socket to connect to konnectivity server + // This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket) + UDSName string `json:"udsName,omitempty"` +} + +// TLSConfig provides the authentication information to connect to konnectivity server +// Only used with TCPTransport +type TLSConfig struct { + // caBundle is the file location of the CA to be used to determine trust with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // If absent while TCPTransport.URL is prefixed with https://, default to system trust roots. + // +optional + CABundle string `json:"caBundle,omitempty"` + + // clientKey is the file location of the client key to be used in mtls handshakes with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // Must be configured if TCPTransport.URL is prefixed with https:// + // +optional + ClientKey string `json:"clientKey,omitempty"` + + // clientCert is the file location of the client certificate to be used in mtls handshakes with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // Must be configured if TCPTransport.URL is prefixed with https:// + // +optional + ClientCert string `json:"clientCert,omitempty"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..b0e44b7e16 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go @@ -0,0 +1,265 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + apiserver "k8s.io/apiserver/pkg/apis/apiserver" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*Connection)(nil), (*apiserver.Connection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Connection_To_apiserver_Connection(a.(*Connection), b.(*apiserver.Connection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.Connection)(nil), (*Connection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_Connection_To_v1beta1_Connection(a.(*apiserver.Connection), b.(*Connection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelection)(nil), (*EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection(a.(*apiserver.EgressSelection), b.(*EgressSelection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*EgressSelectorConfiguration)(nil), (*apiserver.EgressSelectorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(a.(*EgressSelectorConfiguration), b.(*apiserver.EgressSelectorConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelectorConfiguration)(nil), (*EgressSelectorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(a.(*apiserver.EgressSelectorConfiguration), b.(*EgressSelectorConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TCPTransport)(nil), (*apiserver.TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_TCPTransport_To_apiserver_TCPTransport(a.(*TCPTransport), b.(*apiserver.TCPTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.TCPTransport)(nil), (*TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_TCPTransport_To_v1beta1_TCPTransport(a.(*apiserver.TCPTransport), b.(*TCPTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TLSConfig)(nil), (*apiserver.TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_TLSConfig_To_apiserver_TLSConfig(a.(*TLSConfig), b.(*apiserver.TLSConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.TLSConfig)(nil), (*TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_TLSConfig_To_v1beta1_TLSConfig(a.(*apiserver.TLSConfig), b.(*TLSConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Transport)(nil), (*apiserver.Transport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Transport_To_apiserver_Transport(a.(*Transport), b.(*apiserver.Transport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.Transport)(nil), (*Transport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_Transport_To_v1beta1_Transport(a.(*apiserver.Transport), b.(*Transport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*UDSTransport)(nil), (*apiserver.UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_UDSTransport_To_apiserver_UDSTransport(a.(*UDSTransport), b.(*apiserver.UDSTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.UDSTransport)(nil), (*UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_UDSTransport_To_v1beta1_UDSTransport(a.(*apiserver.UDSTransport), b.(*UDSTransport), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error { + out.ProxyProtocol = apiserver.ProtocolType(in.ProxyProtocol) + out.Transport = (*apiserver.Transport)(unsafe.Pointer(in.Transport)) + return nil +} + +// Convert_v1beta1_Connection_To_apiserver_Connection is an autogenerated conversion function. +func Convert_v1beta1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error { + return autoConvert_v1beta1_Connection_To_apiserver_Connection(in, out, s) +} + +func autoConvert_apiserver_Connection_To_v1beta1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error { + out.ProxyProtocol = ProtocolType(in.ProxyProtocol) + out.Transport = (*Transport)(unsafe.Pointer(in.Transport)) + return nil +} + +// Convert_apiserver_Connection_To_v1beta1_Connection is an autogenerated conversion function. +func Convert_apiserver_Connection_To_v1beta1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error { + return autoConvert_apiserver_Connection_To_v1beta1_Connection(in, out, s) +} + +func autoConvert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1beta1_Connection_To_apiserver_Connection(&in.Connection, &out.Connection, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection is an autogenerated conversion function. +func Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error { + return autoConvert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in, out, s) +} + +func autoConvert_apiserver_EgressSelection_To_v1beta1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_apiserver_Connection_To_v1beta1_Connection(&in.Connection, &out.Connection, s); err != nil { + return err + } + return nil +} + +// Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection is an autogenerated conversion function. +func Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error { + return autoConvert_apiserver_EgressSelection_To_v1beta1_EgressSelection(in, out, s) +} + +func autoConvert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error { + out.EgressSelections = *(*[]apiserver.EgressSelection)(unsafe.Pointer(&in.EgressSelections)) + return nil +} + +// Convert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration is an autogenerated conversion function. +func Convert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in, out, s) +} + +func autoConvert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error { + out.EgressSelections = *(*[]EgressSelection)(unsafe.Pointer(&in.EgressSelections)) + return nil +} + +// Convert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration is an autogenerated conversion function. +func Convert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(in, out, s) +} + +func autoConvert_v1beta1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error { + out.URL = in.URL + out.TLSConfig = (*apiserver.TLSConfig)(unsafe.Pointer(in.TLSConfig)) + return nil +} + +// Convert_v1beta1_TCPTransport_To_apiserver_TCPTransport is an autogenerated conversion function. +func Convert_v1beta1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error { + return autoConvert_v1beta1_TCPTransport_To_apiserver_TCPTransport(in, out, s) +} + +func autoConvert_apiserver_TCPTransport_To_v1beta1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error { + out.URL = in.URL + out.TLSConfig = (*TLSConfig)(unsafe.Pointer(in.TLSConfig)) + return nil +} + +// Convert_apiserver_TCPTransport_To_v1beta1_TCPTransport is an autogenerated conversion function. +func Convert_apiserver_TCPTransport_To_v1beta1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error { + return autoConvert_apiserver_TCPTransport_To_v1beta1_TCPTransport(in, out, s) +} + +func autoConvert_v1beta1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error { + out.CABundle = in.CABundle + out.ClientKey = in.ClientKey + out.ClientCert = in.ClientCert + return nil +} + +// Convert_v1beta1_TLSConfig_To_apiserver_TLSConfig is an autogenerated conversion function. +func Convert_v1beta1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error { + return autoConvert_v1beta1_TLSConfig_To_apiserver_TLSConfig(in, out, s) +} + +func autoConvert_apiserver_TLSConfig_To_v1beta1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error { + out.CABundle = in.CABundle + out.ClientKey = in.ClientKey + out.ClientCert = in.ClientCert + return nil +} + +// Convert_apiserver_TLSConfig_To_v1beta1_TLSConfig is an autogenerated conversion function. +func Convert_apiserver_TLSConfig_To_v1beta1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error { + return autoConvert_apiserver_TLSConfig_To_v1beta1_TLSConfig(in, out, s) +} + +func autoConvert_v1beta1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error { + out.TCP = (*apiserver.TCPTransport)(unsafe.Pointer(in.TCP)) + out.UDS = (*apiserver.UDSTransport)(unsafe.Pointer(in.UDS)) + return nil +} + +// Convert_v1beta1_Transport_To_apiserver_Transport is an autogenerated conversion function. +func Convert_v1beta1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error { + return autoConvert_v1beta1_Transport_To_apiserver_Transport(in, out, s) +} + +func autoConvert_apiserver_Transport_To_v1beta1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error { + out.TCP = (*TCPTransport)(unsafe.Pointer(in.TCP)) + out.UDS = (*UDSTransport)(unsafe.Pointer(in.UDS)) + return nil +} + +// Convert_apiserver_Transport_To_v1beta1_Transport is an autogenerated conversion function. +func Convert_apiserver_Transport_To_v1beta1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error { + return autoConvert_apiserver_Transport_To_v1beta1_Transport(in, out, s) +} + +func autoConvert_v1beta1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error { + out.UDSName = in.UDSName + return nil +} + +// Convert_v1beta1_UDSTransport_To_apiserver_UDSTransport is an autogenerated conversion function. +func Convert_v1beta1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error { + return autoConvert_v1beta1_UDSTransport_To_apiserver_UDSTransport(in, out, s) +} + +func autoConvert_apiserver_UDSTransport_To_v1beta1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error { + out.UDSName = in.UDSName + return nil +} + +// Convert_apiserver_UDSTransport_To_v1beta1_UDSTransport is an autogenerated conversion function. +func Convert_apiserver_UDSTransport_To_v1beta1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error { + return autoConvert_apiserver_UDSTransport_To_v1beta1_UDSTransport(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..eda0eaffc4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,174 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Connection) DeepCopyInto(out *Connection) { + *out = *in + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(Transport) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection. +func (in *Connection) DeepCopy() *Connection { + if in == nil { + return nil + } + out := new(Connection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressSelection) DeepCopyInto(out *EgressSelection) { + *out = *in + in.Connection.DeepCopyInto(&out.Connection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelection. +func (in *EgressSelection) DeepCopy() *EgressSelection { + if in == nil { + return nil + } + out := new(EgressSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressSelectorConfiguration) DeepCopyInto(out *EgressSelectorConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]EgressSelection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelectorConfiguration. +func (in *EgressSelectorConfiguration) DeepCopy() *EgressSelectorConfiguration { + if in == nil { + return nil + } + out := new(EgressSelectorConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPTransport) DeepCopyInto(out *TCPTransport) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport. +func (in *TCPTransport) DeepCopy() *TCPTransport { + if in == nil { + return nil + } + out := new(TCPTransport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Transport) DeepCopyInto(out *Transport) { + *out = *in + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = new(TCPTransport) + (*in).DeepCopyInto(*out) + } + if in.UDS != nil { + in, out := &in.UDS, &out.UDS + *out = new(UDSTransport) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport. +func (in *Transport) DeepCopy() *Transport { + if in == nil { + return nil + } + out := new(Transport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDSTransport) DeepCopyInto(out *UDSTransport) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport. +func (in *UDSTransport) DeepCopy() *UDSTransport { + if in == nil { + return nil + } + out := new(UDSTransport) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..73e63fc114 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go new file mode 100644 index 0000000000..622f1b5dc8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go @@ -0,0 +1,227 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package apiserver + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]AdmissionPluginConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfiguration. +func (in *AdmissionConfiguration) DeepCopy() *AdmissionConfiguration { + if in == nil { + return nil + } + out := new(AdmissionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfiguration. +func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration { + if in == nil { + return nil + } + out := new(AdmissionPluginConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Connection) DeepCopyInto(out *Connection) { + *out = *in + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(Transport) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection. +func (in *Connection) DeepCopy() *Connection { + if in == nil { + return nil + } + out := new(Connection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressSelection) DeepCopyInto(out *EgressSelection) { + *out = *in + in.Connection.DeepCopyInto(&out.Connection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelection. +func (in *EgressSelection) DeepCopy() *EgressSelection { + if in == nil { + return nil + } + out := new(EgressSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressSelectorConfiguration) DeepCopyInto(out *EgressSelectorConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]EgressSelection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelectorConfiguration. +func (in *EgressSelectorConfiguration) DeepCopy() *EgressSelectorConfiguration { + if in == nil { + return nil + } + out := new(EgressSelectorConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPTransport) DeepCopyInto(out *TCPTransport) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport. +func (in *TCPTransport) DeepCopy() *TCPTransport { + if in == nil { + return nil + } + out := new(TCPTransport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Transport) DeepCopyInto(out *Transport) { + *out = *in + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = new(TCPTransport) + (*in).DeepCopyInto(*out) + } + if in.UDS != nil { + in, out := &in.UDS, &out.UDS + *out = new(UDSTransport) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport. +func (in *Transport) DeepCopy() *Transport { + if in == nil { + return nil + } + out := new(Transport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDSTransport) DeepCopyInto(out *UDSTransport) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport. +func (in *UDSTransport) DeepCopy() *UDSTransport { + if in == nil { + return nil + } + out := new(UDSTransport) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS b/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS new file mode 100644 index 0000000000..7a21104f6d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# approval on api packages bubbles to api-approvers +reviewers: +- sig-auth-audit-approvers +- sig-auth-audit-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go b/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go new file mode 100644 index 0000000000..deda9cbd63 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=audit.k8s.io + +package audit // import "k8s.io/apiserver/pkg/apis/audit" diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go b/vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go new file mode 100644 index 0000000000..05fe72c0ff --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +func ordLevel(l Level) int { + switch l { + case LevelMetadata: + return 1 + case LevelRequest: + return 2 + case LevelRequestResponse: + return 3 + default: + return 0 + } +} + +func (a Level) Less(b Level) bool { + return ordLevel(a) < ordLevel(b) +} + +func (a Level) GreaterOrEqual(b Level) bool { + return ordLevel(a) >= ordLevel(b) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/install/install.go b/vendor/k8s.io/apiserver/pkg/apis/audit/install/install.go new file mode 100644 index 0000000000..6e7d5bc824 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/install/install.go @@ -0,0 +1,37 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/apis/audit/v1" + "k8s.io/apiserver/pkg/apis/audit/v1alpha1" + "k8s.io/apiserver/pkg/apis/audit/v1beta1" +) + +// Install registers the API group and adds types to a scheme +func Install(scheme *runtime.Scheme) { + utilruntime.Must(audit.AddToScheme(scheme)) + utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(v1beta1.AddToScheme(scheme)) + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion)) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/register.go b/vendor/k8s.io/apiserver/pkg/apis/audit/register.go new file mode 100644 index 0000000000..9abf739ae0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "audit.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Event{}, + &EventList{}, + &Policy{}, + &PolicyList{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/types.go new file mode 100644 index 0000000000..b497be6df2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/types.go @@ -0,0 +1,286 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + authnv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// Header keys used by the audit system. +const ( + // Header to hold the audit ID as the request is propagated through the serving hierarchy. The + // Audit-ID header should be set by the first server to receive the request (e.g. the federation + // server or kube-aggregator). + // + // Audit ID is also returned to client by http response header. + // It's not guaranteed Audit-Id http header is sent for all requests. When kube-apiserver didn't + // audit the events according to the audit policy, no Audit-ID is returned. Also, for request to + // pods/exec, pods/attach, pods/proxy, kube-apiserver works like a proxy and redirect the request + // to kubelet node, users will only get http headers sent from kubelet node, so no Audit-ID is + // sent when users run command like "kubectl exec" or "kubectl attach". + HeaderAuditID = "Audit-ID" +) + +// Level defines the amount of information logged during auditing +type Level string + +// Valid audit levels +const ( + // LevelNone disables auditing + LevelNone Level = "None" + // LevelMetadata provides the basic level of auditing. + LevelMetadata Level = "Metadata" + // LevelRequest provides Metadata level of auditing, and additionally + // logs the request object (does not apply for non-resource requests). + LevelRequest Level = "Request" + // LevelRequestResponse provides Request level of auditing, and additionally + // logs the response object (does not apply for non-resource requests). + LevelRequestResponse Level = "RequestResponse" +) + +// Stage defines the stages in request handling that audit events may be generated. +type Stage string + +// Valid audit stages. +const ( + // The stage for events generated as soon as the audit handler receives the request, and before it + // is delegated down the handler chain. + StageRequestReceived = "RequestReceived" + // The stage for events generated once the response headers are sent, but before the response body + // is sent. This stage is only generated for long-running requests (e.g. watch). + StageResponseStarted = "ResponseStarted" + // The stage for events generated once the response body has been completed, and no more bytes + // will be sent. + StageResponseComplete = "ResponseComplete" + // The stage for events generated when a panic occurred. + StagePanic = "Panic" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event captures all the information that can be included in an API audit log. +type Event struct { + metav1.TypeMeta + + // AuditLevel at which event was generated + Level Level + + // Unique audit ID, generated for each request. + AuditID types.UID + // Stage of the request handling when this event instance was generated. + Stage Stage + + // RequestURI is the request URI as sent by the client to a server. + RequestURI string + // Verb is the kubernetes verb associated with the request. + // For non-resource requests, this is the lower-cased HTTP method. + Verb string + // Authenticated user information. + User authnv1.UserInfo + // Impersonated user information. + // +optional + ImpersonatedUser *authnv1.UserInfo + // Source IPs, from where the request originated and intermediate proxies. + // +optional + SourceIPs []string + // UserAgent records the user agent string reported by the client. + // Note that the UserAgent is provided by the client, and must not be trusted. + // +optional + UserAgent string + // Object reference this request is targeted at. + // Does not apply for List-type requests, or non-resource requests. + // +optional + ObjectRef *ObjectReference + // The response status, populated even when the ResponseObject is not a Status type. + // For successful responses, this will only include the Code. For non-status type + // error responses, this will be auto-populated with the error Message. + // +optional + ResponseStatus *metav1.Status + + // API object from the request, in JSON format. The RequestObject is recorded as-is in the request + // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or + // merging. It is an external versioned object type, and may not be a valid object on its own. + // Omitted for non-resource requests. Only logged at Request Level and higher. + // +optional + RequestObject *runtime.Unknown + // API object returned in the response, in JSON. The ResponseObject is recorded after conversion + // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged + // at Response Level. + // +optional + ResponseObject *runtime.Unknown + + // Time the request reached the apiserver. + RequestReceivedTimestamp metav1.MicroTime + // Time the request reached current audit stage. + StageTimestamp metav1.MicroTime + + // Annotations is an unstructured key value map stored with an audit event that may be set by + // plugins invoked in the request serving chain, including authentication, authorization and + // admission plugins. Note that these annotations are for the audit event, and do not correspond + // to the metadata.annotations of the submitted object. Keys should uniquely identify the informing + // component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values + // should be short. Annotations are included in the Metadata level. + // +optional + Annotations map[string]string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of audit Events. +type EventList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Event +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Policy defines the configuration of audit logging, and the rules for how different request +// categories are logged. +type Policy struct { + metav1.TypeMeta + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + metav1.ObjectMeta + + // Rules specify the audit Level a request should be recorded at. + // A request may match multiple rules, in which case the FIRST matching rule is used. + // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. + // PolicyRules are strictly ordered. + Rules []PolicyRule + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + OmitStages []Stage +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PolicyList is a list of audit Policies. +type PolicyList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Policy +} + +// PolicyRule maps requests based off metadata to an audit Level. +// Requests must match the rules of every field (an intersection of rules). +type PolicyRule struct { + // The Level that requests matching this rule are recorded at. + Level Level + + // The users (by authenticated user name) this rule applies to. + // An empty list implies every user. + // +optional + Users []string + // The user groups this rule applies to. A user is considered matching + // if it is a member of any of the UserGroups. + // An empty list implies every user group. + // +optional + UserGroups []string + + // The verbs that match this rule. + // An empty list implies every verb. + // +optional + Verbs []string + + // Rules can apply to API resources (such as "pods" or "secrets"), + // non-resource URL paths (such as "/api"), or neither, but not both. + // If neither is specified, the rule is treated as a default for all URLs. + + // Resources that this rule matches. An empty list implies all kinds in all API groups. + // +optional + Resources []GroupResources + // Namespaces that this rule matches. + // The empty string "" matches non-namespaced resources. + // An empty list implies every namespace. + // +optional + Namespaces []string + + // NonResourceURLs is a set of URL paths that should be audited. + // *s are allowed, but only as the full, final step in the path. + // Examples: + // "/metrics" - Log requests for apiserver metrics + // "/healthz*" - Log all health checks + // +optional + NonResourceURLs []string + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. + // An empty list means no restrictions will apply. + // +optional + OmitStages []Stage +} + +// GroupResources represents resource kinds in an API group. +type GroupResources struct { + // Group is the name of the API group that contains the resources. + // The empty string represents the core API group. + // +optional + Group string + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. + // +optional + Resources []string + // ResourceNames is a list of resource instance names that the policy matches. + // Using this field requires Resources to be specified. + // An empty list implies that every instance of the resource is matched. + // +optional + ResourceNames []string +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // +optional + Resource string + // +optional + Namespace string + // +optional + Name string + // +optional + UID types.UID + // APIGroup is the name of the API group that contains the referred object. + // The empty string represents the core API group. + // +optional + APIGroup string + // APIVersion is the version of the API group that contains the referred object. + // +optional + APIVersion string + // +optional + ResourceVersion string + // +optional + Subresource string +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go new file mode 100644 index 0000000000..d1f180c942 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/audit +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=audit.k8s.io + +package v1 // import "k8s.io/apiserver/pkg/apis/audit/v1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go new file mode 100644 index 0000000000..3d2f444194 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go @@ -0,0 +1,3184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/api/authentication/v1" + v11 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_4982ac40a460d730, []int{0} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_4982ac40a460d730, []int{1} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} + +var xxx_messageInfo_EventList proto.InternalMessageInfo + +func (m *GroupResources) Reset() { *m = GroupResources{} } +func (*GroupResources) ProtoMessage() {} +func (*GroupResources) Descriptor() ([]byte, []int) { + return fileDescriptor_4982ac40a460d730, []int{2} +} +func (m *GroupResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupResources.Merge(m, src) +} +func (m *GroupResources) XXX_Size() int { + return m.Size() +} +func (m *GroupResources) XXX_DiscardUnknown() { + xxx_messageInfo_GroupResources.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupResources proto.InternalMessageInfo + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_4982ac40a460d730, []int{3} +} +func (m *ObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectReference.Merge(m, src) +} +func (m *ObjectReference) XXX_Size() int { + return m.Size() +} +func (m *ObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectReference proto.InternalMessageInfo + +func (m *Policy) Reset() { *m = Policy{} } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { + return fileDescriptor_4982ac40a460d730, []int{4} +} +func (m *Policy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Policy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Policy.Merge(m, src) +} +func (m *Policy) XXX_Size() int { + return m.Size() +} +func (m *Policy) XXX_DiscardUnknown() { + xxx_messageInfo_Policy.DiscardUnknown(m) +} + +var xxx_messageInfo_Policy proto.InternalMessageInfo + +func (m *PolicyList) Reset() { *m = PolicyList{} } +func (*PolicyList) ProtoMessage() {} +func (*PolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_4982ac40a460d730, []int{5} +} +func (m *PolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyList.Merge(m, src) +} +func (m *PolicyList) XXX_Size() int { + return m.Size() +} +func (m *PolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyList proto.InternalMessageInfo + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_4982ac40a460d730, []int{6} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Event)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.Event") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.Event.AnnotationsEntry") + proto.RegisterType((*EventList)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.EventList") + proto.RegisterType((*GroupResources)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.GroupResources") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.ObjectReference") + proto.RegisterType((*Policy)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.Policy") + proto.RegisterType((*PolicyList)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.PolicyList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.PolicyRule") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto", fileDescriptor_4982ac40a460d730) +} + +var fileDescriptor_4982ac40a460d730 = []byte{ + // 1243 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xcd, 0x8e, 0x1b, 0x45, + 0x10, 0xde, 0x59, 0xaf, 0xb3, 0x76, 0x3b, 0xeb, 0x75, 0x3a, 0x11, 0x19, 0xed, 0xc1, 0x36, 0x46, + 0x42, 0x06, 0x96, 0x99, 0xec, 0x12, 0x48, 0x14, 0x09, 0x24, 0x5b, 0x89, 0xc0, 0x22, 0xd9, 0xac, + 0xda, 0x38, 0x07, 0xc4, 0x21, 0xe3, 0x71, 0xc5, 0x1e, 0x6c, 0xcf, 0x4c, 0xba, 0x7b, 0x8c, 0xf6, + 0xc6, 0x0b, 0x20, 0x71, 0xe7, 0x2d, 0xb8, 0x45, 0xbc, 0x40, 0x8e, 0x39, 0xe6, 0x64, 0x11, 0xc3, + 0x43, 0xa0, 0x9c, 0x50, 0xff, 0xcc, 0x8f, 0xbd, 0x6b, 0xc5, 0xcb, 0x81, 0xdb, 0x74, 0xd5, 0xf7, + 0x7d, 0x55, 0x5d, 0xd3, 0x55, 0xdd, 0xe8, 0xdb, 0xf1, 0x5d, 0x66, 0x79, 0x81, 0x3d, 0x8e, 0xfa, + 0x40, 0x7d, 0xe0, 0xc0, 0xec, 0x19, 0xf8, 0x83, 0x80, 0xda, 0xda, 0xe1, 0x84, 0x1e, 0x03, 0x3a, + 0x03, 0x6a, 0x87, 0xe3, 0xa1, 0x5c, 0xd9, 0x4e, 0x34, 0xf0, 0xb8, 0x3d, 0x3b, 0xb2, 0x87, 0xe0, + 0x03, 0x75, 0x38, 0x0c, 0xac, 0x90, 0x06, 0x3c, 0xc0, 0x0d, 0xc5, 0xb1, 0x12, 0x8e, 0x15, 0x8e, + 0x87, 0x72, 0x65, 0x49, 0x8e, 0x35, 0x3b, 0x3a, 0xf8, 0x74, 0xe8, 0xf1, 0x51, 0xd4, 0xb7, 0xdc, + 0x60, 0x6a, 0x0f, 0x83, 0x61, 0x60, 0x4b, 0x6a, 0x3f, 0x7a, 0x26, 0x57, 0x72, 0x21, 0xbf, 0x94, + 0xe4, 0xc1, 0x61, 0x9a, 0x86, 0xed, 0x44, 0x7c, 0x04, 0x3e, 0xf7, 0x5c, 0x87, 0x7b, 0x81, 0x7f, + 0x41, 0x02, 0x07, 0xb7, 0x53, 0xf4, 0xd4, 0x71, 0x47, 0x9e, 0x0f, 0xf4, 0x2c, 0xcd, 0x7b, 0x0a, + 0xdc, 0xb9, 0x88, 0x65, 0xaf, 0x63, 0xd1, 0xc8, 0xe7, 0xde, 0x14, 0xce, 0x11, 0xbe, 0x78, 0x17, + 0x81, 0xb9, 0x23, 0x98, 0x3a, 0xab, 0xbc, 0xc6, 0xdf, 0x08, 0xe5, 0x1f, 0xcc, 0xc0, 0xe7, 0xf8, + 0x10, 0xe5, 0x27, 0x30, 0x83, 0x89, 0x69, 0xd4, 0x8d, 0x66, 0xb1, 0xfd, 0xde, 0xcb, 0x79, 0x6d, + 0x6b, 0x31, 0xaf, 0xe5, 0x1f, 0x0a, 0xe3, 0xdb, 0xf8, 0x83, 0x28, 0x10, 0x3e, 0x41, 0xbb, 0xb2, + 0x7e, 0x9d, 0xfb, 0xe6, 0xb6, 0xc4, 0xdf, 0xd6, 0xf8, 0xdd, 0x96, 0x32, 0xbf, 0x9d, 0xd7, 0xde, + 0x5f, 0x97, 0x13, 0x3f, 0x0b, 0x81, 0x59, 0xbd, 0xce, 0x7d, 0x12, 0x8b, 0x88, 0xe8, 0x8c, 0x3b, + 0x43, 0x30, 0x73, 0xcb, 0xd1, 0xbb, 0xc2, 0xf8, 0x36, 0xfe, 0x20, 0x0a, 0x84, 0x8f, 0x11, 0xa2, + 0xf0, 0x3c, 0x02, 0xc6, 0x7b, 0xa4, 0x63, 0xee, 0x48, 0x0a, 0xd6, 0x14, 0x44, 0x12, 0x0f, 0xc9, + 0xa0, 0x70, 0x1d, 0xed, 0xcc, 0x80, 0xf6, 0xcd, 0xbc, 0x44, 0x5f, 0xd5, 0xe8, 0x9d, 0x27, 0x40, + 0xfb, 0x44, 0x7a, 0xf0, 0x37, 0x68, 0x27, 0x62, 0x40, 0xcd, 0x2b, 0x75, 0xa3, 0x59, 0x3a, 0xfe, + 0xd0, 0x4a, 0x8f, 0x8e, 0xb5, 0xfc, 0x9f, 0xad, 0xd9, 0x91, 0xd5, 0x63, 0x40, 0x3b, 0xfe, 0xb3, + 0x20, 0x55, 0x12, 0x16, 0x22, 0x15, 0xf0, 0x08, 0x55, 0xbc, 0x69, 0x08, 0x94, 0x05, 0xbe, 0xa8, + 0xb5, 0xf0, 0x98, 0xbb, 0x97, 0x52, 0xbd, 0xb1, 0x98, 0xd7, 0x2a, 0x9d, 0x15, 0x0d, 0x72, 0x4e, + 0x15, 0x7f, 0x82, 0x8a, 0x2c, 0x88, 0xa8, 0x0b, 0x9d, 0x53, 0x66, 0x16, 0xea, 0xb9, 0x66, 0xb1, + 0xbd, 0xb7, 0x98, 0xd7, 0x8a, 0xdd, 0xd8, 0x48, 0x52, 0x3f, 0xb6, 0x51, 0x51, 0xa4, 0xd7, 0x1a, + 0x82, 0xcf, 0xcd, 0x8a, 0xac, 0xc3, 0x35, 0x9d, 0x7d, 0xb1, 0x17, 0x3b, 0x48, 0x8a, 0xc1, 0x4f, + 0x51, 0x31, 0xe8, 0xff, 0x08, 0x2e, 0x27, 0xf0, 0xcc, 0x2c, 0xca, 0x0d, 0x7c, 0x66, 0xbd, 0xbb, + 0xa3, 0xac, 0xc7, 0x31, 0x09, 0x28, 0xf8, 0x2e, 0xa8, 0x94, 0x12, 0x23, 0x49, 0x45, 0xf1, 0x08, + 0x95, 0x29, 0xb0, 0x30, 0xf0, 0x19, 0x74, 0xb9, 0xc3, 0x23, 0x66, 0x22, 0x19, 0xe6, 0x30, 0x13, + 0x26, 0x39, 0x3c, 0x69, 0x24, 0xd1, 0x37, 0x22, 0x90, 0xe2, 0xb4, 0xf1, 0x62, 0x5e, 0x2b, 0x93, + 0x25, 0x1d, 0xb2, 0xa2, 0x8b, 0x1d, 0xb4, 0xa7, 0x4f, 0x83, 0x4a, 0xc4, 0x2c, 0xc9, 0x40, 0xcd, + 0xb5, 0x81, 0x74, 0xe7, 0x58, 0x3d, 0x7f, 0xec, 0x07, 0x3f, 0xf9, 0xed, 0x6b, 0x8b, 0x79, 0x6d, + 0x8f, 0x64, 0x25, 0xc8, 0xb2, 0x22, 0x1e, 0xa4, 0x9b, 0xd1, 0x31, 0xae, 0x5e, 0x32, 0xc6, 0xd2, + 0x46, 0x74, 0x90, 0x15, 0x4d, 0xfc, 0x8b, 0x81, 0x4c, 0x1d, 0x97, 0x80, 0x0b, 0xde, 0x0c, 0x06, + 0xdf, 0x79, 0x53, 0x60, 0xdc, 0x99, 0x86, 0xe6, 0x9e, 0x0c, 0x68, 0x6f, 0x56, 0xbd, 0x47, 0x9e, + 0x4b, 0x03, 0xc1, 0x6d, 0xd7, 0xf5, 0x31, 0x30, 0xc9, 0x1a, 0x61, 0xb2, 0x36, 0x24, 0x0e, 0x50, + 0x59, 0x76, 0x65, 0x9a, 0x44, 0xf9, 0xbf, 0x25, 0x11, 0x37, 0x7d, 0xb9, 0xbb, 0x24, 0x47, 0x56, + 0xe4, 0xf1, 0x73, 0x54, 0x72, 0x7c, 0x3f, 0xe0, 0xb2, 0x6b, 0x98, 0xb9, 0x5f, 0xcf, 0x35, 0x4b, + 0xc7, 0xf7, 0x36, 0x39, 0x97, 0x72, 0xd2, 0x59, 0xad, 0x94, 0xfc, 0xc0, 0xe7, 0xf4, 0xac, 0x7d, + 0x5d, 0x07, 0x2e, 0x65, 0x3c, 0x24, 0x1b, 0xe3, 0xe0, 0x2b, 0x54, 0x59, 0x65, 0xe1, 0x0a, 0xca, + 0x8d, 0xe1, 0x4c, 0x8d, 0x4b, 0x22, 0x3e, 0xf1, 0x0d, 0x94, 0x9f, 0x39, 0x93, 0x08, 0xd4, 0x48, + 0x24, 0x6a, 0x71, 0x6f, 0xfb, 0xae, 0xd1, 0x78, 0x61, 0xa0, 0xa2, 0x0c, 0xfe, 0xd0, 0x63, 0x1c, + 0xff, 0x80, 0x0a, 0x62, 0xf7, 0x03, 0x87, 0x3b, 0x92, 0x5e, 0x3a, 0xb6, 0x36, 0xab, 0x95, 0x60, + 0x3f, 0x02, 0xee, 0xb4, 0x2b, 0x3a, 0xe3, 0x42, 0x6c, 0x21, 0x89, 0x22, 0x3e, 0x41, 0x79, 0x8f, + 0xc3, 0x94, 0x99, 0xdb, 0xb2, 0x30, 0x1f, 0x6d, 0x5c, 0x98, 0xf6, 0x5e, 0x3c, 0x75, 0x3b, 0x82, + 0x4f, 0x94, 0x4c, 0xe3, 0x37, 0x03, 0x95, 0xbf, 0xa6, 0x41, 0x14, 0x12, 0x50, 0xa3, 0x84, 0xe1, + 0x0f, 0x50, 0x7e, 0x28, 0x2c, 0xfa, 0xae, 0x48, 0x78, 0x0a, 0xa6, 0x7c, 0x62, 0x34, 0xd1, 0x98, + 0x21, 0x73, 0xd1, 0xa3, 0x29, 0x91, 0x21, 0xa9, 0x1f, 0xdf, 0x11, 0xdd, 0xa9, 0x16, 0x27, 0xce, + 0x14, 0x98, 0x99, 0x93, 0x04, 0xdd, 0x73, 0x19, 0x07, 0x59, 0xc6, 0x35, 0x7e, 0xcf, 0xa1, 0xfd, + 0x95, 0x71, 0x83, 0x0f, 0x51, 0x21, 0x06, 0xe9, 0x0c, 0x93, 0x7a, 0xc5, 0x5a, 0x24, 0x41, 0x88, + 0xa9, 0xe8, 0x0b, 0xa9, 0xd0, 0x71, 0xf5, 0x9f, 0x4b, 0xa7, 0xe2, 0x49, 0xec, 0x20, 0x29, 0x46, + 0xdc, 0x24, 0x62, 0xa1, 0xaf, 0xaa, 0x64, 0xfe, 0x0b, 0x2c, 0x91, 0x1e, 0xdc, 0x46, 0xb9, 0xc8, + 0x1b, 0xe8, 0x8b, 0xe9, 0x96, 0x06, 0xe4, 0x7a, 0x9b, 0xde, 0x8a, 0x82, 0x2c, 0x36, 0xe1, 0x84, + 0x9e, 0xac, 0xa8, 0xbe, 0xb3, 0x92, 0x4d, 0xb4, 0x4e, 0x3b, 0xaa, 0xd2, 0x09, 0x42, 0xdc, 0x88, + 0x4e, 0xe8, 0x3d, 0x01, 0xca, 0xbc, 0xc0, 0x97, 0x37, 0x58, 0xe6, 0x46, 0x6c, 0x9d, 0x76, 0xb4, + 0x87, 0x64, 0x50, 0xb8, 0x85, 0xf6, 0xe3, 0x22, 0xc4, 0xc4, 0x5d, 0x49, 0xbc, 0xa9, 0x89, 0xfb, + 0x64, 0xd9, 0x4d, 0x56, 0xf1, 0xf8, 0x73, 0x54, 0x62, 0x51, 0x3f, 0x29, 0x76, 0x41, 0xd2, 0x93, + 0x76, 0xea, 0xa6, 0x2e, 0x92, 0xc5, 0x35, 0xfe, 0x31, 0xd0, 0x95, 0xd3, 0x60, 0xe2, 0xb9, 0x67, + 0xf8, 0xe9, 0xb9, 0x5e, 0xb8, 0xb5, 0x59, 0x2f, 0xa8, 0x9f, 0x2e, 0xbb, 0x21, 0xd9, 0x68, 0x6a, + 0xcb, 0xf4, 0x43, 0x17, 0xe5, 0x69, 0x34, 0x81, 0xb8, 0x1f, 0xac, 0x4d, 0xfa, 0x41, 0x25, 0x47, + 0xa2, 0x09, 0xa4, 0x87, 0x5b, 0xac, 0x18, 0x51, 0x5a, 0xf8, 0x0e, 0x42, 0xc1, 0xd4, 0xe3, 0x72, + 0x52, 0xc5, 0x87, 0xf5, 0xa6, 0x4c, 0x21, 0xb1, 0xa6, 0xaf, 0x96, 0x0c, 0xb4, 0xf1, 0x87, 0x81, + 0x90, 0x52, 0xff, 0x1f, 0x46, 0xc1, 0xe3, 0xe5, 0x51, 0xf0, 0xf1, 0xe6, 0x5b, 0x5f, 0x33, 0x0b, + 0x5e, 0xe4, 0xe2, 0xec, 0x45, 0x35, 0x2e, 0xf9, 0x66, 0xac, 0xa1, 0xbc, 0x78, 0x5a, 0xc4, 0xc3, + 0xa0, 0x28, 0x90, 0xe2, 0xd9, 0xc1, 0x88, 0xb2, 0x63, 0x0b, 0x21, 0xf1, 0x21, 0x4f, 0x74, 0x5c, + 0xd4, 0xb2, 0x28, 0x6a, 0x2f, 0xb1, 0x92, 0x0c, 0x42, 0x08, 0x8a, 0x87, 0x1b, 0x33, 0x77, 0x52, + 0x41, 0xf1, 0x9e, 0x63, 0x44, 0xd9, 0xb1, 0x9b, 0x1d, 0x41, 0x79, 0x59, 0x83, 0xe3, 0x4d, 0x6a, + 0xb0, 0x3c, 0xee, 0xd2, 0x71, 0x70, 0xe1, 0xe8, 0xb2, 0x10, 0x4a, 0x66, 0x03, 0x33, 0xaf, 0xa4, + 0x59, 0x27, 0xc3, 0x83, 0x91, 0x0c, 0x02, 0x7f, 0x89, 0xf6, 0xfd, 0xc0, 0x8f, 0xa5, 0x7a, 0xe4, + 0x21, 0x33, 0x77, 0x25, 0xe9, 0xba, 0x68, 0xb9, 0x93, 0x65, 0x17, 0x59, 0xc5, 0xae, 0x9c, 0xbc, + 0xc2, 0xc6, 0x27, 0xaf, 0xdd, 0x7c, 0xf9, 0xa6, 0xba, 0xf5, 0xea, 0x4d, 0x75, 0xeb, 0xf5, 0x9b, + 0xea, 0xd6, 0xcf, 0x8b, 0xaa, 0xf1, 0x72, 0x51, 0x35, 0x5e, 0x2d, 0xaa, 0xc6, 0xeb, 0x45, 0xd5, + 0xf8, 0x73, 0x51, 0x35, 0x7e, 0xfd, 0xab, 0xba, 0xf5, 0xfd, 0xf6, 0xec, 0xe8, 0xdf, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x33, 0x6e, 0xcf, 0xc7, 0x82, 0x0d, 0x00, 0x00, +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UserAgent) + copy(dAtA[i:], m.UserAgent) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserAgent))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x7a + } + } + { + size, err := m.StageTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + { + size, err := m.RequestReceivedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + if m.ResponseObject != nil { + { + size, err := m.ResponseObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.RequestObject != nil { + { + size, err := m.RequestObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.ResponseStatus != nil { + { + size, err := m.ResponseStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.ObjectRef != nil { + { + size, err := m.ObjectRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.SourceIPs) > 0 { + for iNdEx := len(m.SourceIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceIPs[iNdEx]) + copy(dAtA[i:], m.SourceIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SourceIPs[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.ImpersonatedUser != nil { + { + size, err := m.ImpersonatedUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x2a + i -= len(m.RequestURI) + copy(dAtA[i:], m.RequestURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestURI))) + i-- + dAtA[i] = 0x22 + i -= len(m.Stage) + copy(dAtA[i:], m.Stage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Stage))) + i-- + dAtA[i] = 0x1a + i -= len(m.AuditID) + copy(dAtA[i:], m.AuditID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuditID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x42 + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x3a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Policy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OmitStages) > 0 { + for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OmitStages[iNdEx]) + copy(dAtA[i:], m.OmitStages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OmitStages[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OmitStages) > 0 { + for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OmitStages[iNdEx]) + copy(dAtA[i:], m.OmitStages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OmitStages[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.UserGroups) > 0 { + for iNdEx := len(m.UserGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UserGroups[iNdEx]) + copy(dAtA[i:], m.UserGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserGroups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Users) > 0 { + for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Users[iNdEx]) + copy(dAtA[i:], m.Users[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Users[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuditID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Stage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RequestURI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.ImpersonatedUser != nil { + l = m.ImpersonatedUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.SourceIPs) > 0 { + for _, s := range m.SourceIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ObjectRef != nil { + l = m.ObjectRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResponseStatus != nil { + l = m.ResponseStatus.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RequestObject != nil { + l = m.RequestObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResponseObject != nil { + l = m.ResponseObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.RequestReceivedTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.StageTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.UserAgent) + n += 2 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Policy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.OmitStages) > 0 { + for _, s := range m.OmitStages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.UserGroups) > 0 { + for _, s := range m.UserGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.OmitStages) > 0 { + for _, s := range m.OmitStages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&Event{`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `AuditID:` + fmt.Sprintf("%v", this.AuditID) + `,`, + `Stage:` + fmt.Sprintf("%v", this.Stage) + `,`, + `RequestURI:` + fmt.Sprintf("%v", this.RequestURI) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "UserInfo", "v1.UserInfo", 1), `&`, ``, 1) + `,`, + `ImpersonatedUser:` + strings.Replace(fmt.Sprintf("%v", this.ImpersonatedUser), "UserInfo", "v1.UserInfo", 1) + `,`, + `SourceIPs:` + fmt.Sprintf("%v", this.SourceIPs) + `,`, + `ObjectRef:` + strings.Replace(this.ObjectRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `ResponseStatus:` + strings.Replace(fmt.Sprintf("%v", this.ResponseStatus), "Status", "v11.Status", 1) + `,`, + `RequestObject:` + strings.Replace(fmt.Sprintf("%v", this.RequestObject), "Unknown", "runtime.Unknown", 1) + `,`, + `ResponseObject:` + strings.Replace(fmt.Sprintf("%v", this.ResponseObject), "Unknown", "runtime.Unknown", 1) + `,`, + `RequestReceivedTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequestReceivedTimestamp), "MicroTime", "v11.MicroTime", 1), `&`, ``, 1) + `,`, + `StageTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StageTimestamp), "MicroTime", "v11.MicroTime", 1), `&`, ``, 1) + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `UserAgent:` + fmt.Sprintf("%v", this.UserAgent) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *GroupResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupResources{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectReference{`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `}`, + }, "") + return s +} +func (this *Policy) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&Policy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Policy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Policy", "Policy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]GroupResources{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "GroupResources", "GroupResources", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&PolicyRule{`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `Users:` + fmt.Sprintf("%v", this.Users) + `,`, + `UserGroups:` + fmt.Sprintf("%v", this.UserGroups) + `,`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = Level(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuditID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stage = Stage(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImpersonatedUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImpersonatedUser == nil { + m.ImpersonatedUser = &v1.UserInfo{} + } + if err := m.ImpersonatedUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceIPs = append(m.SourceIPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectRef == nil { + m.ObjectRef = &ObjectReference{} + } + if err := m.ObjectRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseStatus == nil { + m.ResponseStatus = &v11.Status{} + } + if err := m.ResponseStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestObject == nil { + m.RequestObject = &runtime.Unknown{} + } + if err := m.RequestObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseObject == nil { + m.ResponseObject = &runtime.Unknown{} + } + if err := m.ResponseObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestReceivedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RequestReceivedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StageTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StageTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserAgent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserAgent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Policy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Policy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Policy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = Level(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserGroups = append(m.UserGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, GroupResources{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto new file mode 100644 index 0000000000..d142ee7e49 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto @@ -0,0 +1,249 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.apiserver.pkg.apis.audit.v1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Event captures all the information that can be included in an API audit log. +message Event { + // AuditLevel at which event was generated + optional string level = 1; + + // Unique audit ID, generated for each request. + optional string auditID = 2; + + // Stage of the request handling when this event instance was generated. + optional string stage = 3; + + // RequestURI is the request URI as sent by the client to a server. + optional string requestURI = 4; + + // Verb is the kubernetes verb associated with the request. + // For non-resource requests, this is the lower-cased HTTP method. + optional string verb = 5; + + // Authenticated user information. + optional k8s.io.api.authentication.v1.UserInfo user = 6; + + // Impersonated user information. + // +optional + optional k8s.io.api.authentication.v1.UserInfo impersonatedUser = 7; + + // Source IPs, from where the request originated and intermediate proxies. + // +optional + repeated string sourceIPs = 8; + + // UserAgent records the user agent string reported by the client. + // Note that the UserAgent is provided by the client, and must not be trusted. + // +optional + optional string userAgent = 16; + + // Object reference this request is targeted at. + // Does not apply for List-type requests, or non-resource requests. + // +optional + optional ObjectReference objectRef = 9; + + // The response status, populated even when the ResponseObject is not a Status type. + // For successful responses, this will only include the Code and StatusSuccess. + // For non-status type error responses, this will be auto-populated with the error Message. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status responseStatus = 10; + + // API object from the request, in JSON format. The RequestObject is recorded as-is in the request + // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or + // merging. It is an external versioned object type, and may not be a valid object on its own. + // Omitted for non-resource requests. Only logged at Request Level and higher. + // +optional + optional k8s.io.apimachinery.pkg.runtime.Unknown requestObject = 11; + + // API object returned in the response, in JSON. The ResponseObject is recorded after conversion + // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged + // at Response Level. + // +optional + optional k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 12; + + // Time the request reached the apiserver. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 13; + + // Time the request reached current audit stage. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 14; + + // Annotations is an unstructured key value map stored with an audit event that may be set by + // plugins invoked in the request serving chain, including authentication, authorization and + // admission plugins. Note that these annotations are for the audit event, and do not correspond + // to the metadata.annotations of the submitted object. Keys should uniquely identify the informing + // component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values + // should be short. Annotations are included in the Metadata level. + // +optional + map annotations = 15; +} + +// EventList is a list of audit Events. +message EventList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated Event items = 2; +} + +// GroupResources represents resource kinds in an API group. +message GroupResources { + // Group is the name of the API group that contains the resources. + // The empty string represents the core API group. + // +optional + optional string group = 1; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. + // +optional + repeated string resources = 2; + + // ResourceNames is a list of resource instance names that the policy matches. + // Using this field requires Resources to be specified. + // An empty list implies that every instance of the resource is matched. + // +optional + repeated string resourceNames = 3; +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +message ObjectReference { + // +optional + optional string resource = 1; + + // +optional + optional string namespace = 2; + + // +optional + optional string name = 3; + + // +optional + optional string uid = 4; + + // APIGroup is the name of the API group that contains the referred object. + // The empty string represents the core API group. + // +optional + optional string apiGroup = 5; + + // APIVersion is the version of the API group that contains the referred object. + // +optional + optional string apiVersion = 6; + + // +optional + optional string resourceVersion = 7; + + // +optional + optional string subresource = 8; +} + +// Policy defines the configuration of audit logging, and the rules for how different request +// categories are logged. +message Policy { + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules specify the audit Level a request should be recorded at. + // A request may match multiple rules, in which case the FIRST matching rule is used. + // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. + // PolicyRules are strictly ordered. + repeated PolicyRule rules = 2; + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + repeated string omitStages = 3; +} + +// PolicyList is a list of audit Policies. +message PolicyList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated Policy items = 2; +} + +// PolicyRule maps requests based off metadata to an audit Level. +// Requests must match the rules of every field (an intersection of rules). +message PolicyRule { + // The Level that requests matching this rule are recorded at. + optional string level = 1; + + // The users (by authenticated user name) this rule applies to. + // An empty list implies every user. + // +optional + repeated string users = 2; + + // The user groups this rule applies to. A user is considered matching + // if it is a member of any of the UserGroups. + // An empty list implies every user group. + // +optional + repeated string userGroups = 3; + + // The verbs that match this rule. + // An empty list implies every verb. + // +optional + repeated string verbs = 4; + + // Resources that this rule matches. An empty list implies all kinds in all API groups. + // +optional + repeated GroupResources resources = 5; + + // Namespaces that this rule matches. + // The empty string "" matches non-namespaced resources. + // An empty list implies every namespace. + // +optional + repeated string namespaces = 6; + + // NonResourceURLs is a set of URL paths that should be audited. + // *s are allowed, but only as the full, final step in the path. + // Examples: + // "/metrics" - Log requests for apiserver metrics + // "/healthz*" - Log all health checks + // +optional + repeated string nonResourceURLs = 7; + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. + // An empty list means no restrictions will apply. + // +optional + repeated string omitStages = 8; +} + diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/register.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/register.go new file mode 100644 index 0000000000..46e3e47bc6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "audit.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Event{}, + &EventList{}, + &Policy{}, + &PolicyList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go new file mode 100644 index 0000000000..cf6bb1a019 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go @@ -0,0 +1,280 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authnv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// Header keys used by the audit system. +const ( + // Header to hold the audit ID as the request is propagated through the serving hierarchy. The + // Audit-ID header should be set by the first server to receive the request (e.g. the federation + // server or kube-aggregator). + HeaderAuditID = "Audit-ID" +) + +// Level defines the amount of information logged during auditing +type Level string + +// Valid audit levels +const ( + // LevelNone disables auditing + LevelNone Level = "None" + // LevelMetadata provides the basic level of auditing. + LevelMetadata Level = "Metadata" + // LevelRequest provides Metadata level of auditing, and additionally + // logs the request object (does not apply for non-resource requests). + LevelRequest Level = "Request" + // LevelRequestResponse provides Request level of auditing, and additionally + // logs the response object (does not apply for non-resource requests). + LevelRequestResponse Level = "RequestResponse" +) + +// Stage defines the stages in request handling that audit events may be generated. +type Stage string + +// Valid audit stages. +const ( + // The stage for events generated as soon as the audit handler receives the request, and before it + // is delegated down the handler chain. + StageRequestReceived = "RequestReceived" + // The stage for events generated once the response headers are sent, but before the response body + // is sent. This stage is only generated for long-running requests (e.g. watch). + StageResponseStarted = "ResponseStarted" + // The stage for events generated once the response body has been completed, and no more bytes + // will be sent. + StageResponseComplete = "ResponseComplete" + // The stage for events generated when a panic occurred. + StagePanic = "Panic" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event captures all the information that can be included in an API audit log. +type Event struct { + metav1.TypeMeta `json:",inline"` + + // AuditLevel at which event was generated + Level Level `json:"level" protobuf:"bytes,1,opt,name=level,casttype=Level"` + + // Unique audit ID, generated for each request. + AuditID types.UID `json:"auditID" protobuf:"bytes,2,opt,name=auditID,casttype=k8s.io/apimachinery/pkg/types.UID"` + // Stage of the request handling when this event instance was generated. + Stage Stage `json:"stage" protobuf:"bytes,3,opt,name=stage,casttype=Stage"` + + // RequestURI is the request URI as sent by the client to a server. + RequestURI string `json:"requestURI" protobuf:"bytes,4,opt,name=requestURI"` + // Verb is the kubernetes verb associated with the request. + // For non-resource requests, this is the lower-cased HTTP method. + Verb string `json:"verb" protobuf:"bytes,5,opt,name=verb"` + // Authenticated user information. + User authnv1.UserInfo `json:"user" protobuf:"bytes,6,opt,name=user"` + // Impersonated user information. + // +optional + ImpersonatedUser *authnv1.UserInfo `json:"impersonatedUser,omitempty" protobuf:"bytes,7,opt,name=impersonatedUser"` + // Source IPs, from where the request originated and intermediate proxies. + // +optional + SourceIPs []string `json:"sourceIPs,omitempty" protobuf:"bytes,8,rep,name=sourceIPs"` + // UserAgent records the user agent string reported by the client. + // Note that the UserAgent is provided by the client, and must not be trusted. + // +optional + UserAgent string `json:"userAgent,omitempty" protobuf:"bytes,16,opt,name=userAgent"` + // Object reference this request is targeted at. + // Does not apply for List-type requests, or non-resource requests. + // +optional + ObjectRef *ObjectReference `json:"objectRef,omitempty" protobuf:"bytes,9,opt,name=objectRef"` + // The response status, populated even when the ResponseObject is not a Status type. + // For successful responses, this will only include the Code and StatusSuccess. + // For non-status type error responses, this will be auto-populated with the error Message. + // +optional + ResponseStatus *metav1.Status `json:"responseStatus,omitempty" protobuf:"bytes,10,opt,name=responseStatus"` + + // API object from the request, in JSON format. The RequestObject is recorded as-is in the request + // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or + // merging. It is an external versioned object type, and may not be a valid object on its own. + // Omitted for non-resource requests. Only logged at Request Level and higher. + // +optional + RequestObject *runtime.Unknown `json:"requestObject,omitempty" protobuf:"bytes,11,opt,name=requestObject"` + // API object returned in the response, in JSON. The ResponseObject is recorded after conversion + // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged + // at Response Level. + // +optional + ResponseObject *runtime.Unknown `json:"responseObject,omitempty" protobuf:"bytes,12,opt,name=responseObject"` + // Time the request reached the apiserver. + // +optional + RequestReceivedTimestamp metav1.MicroTime `json:"requestReceivedTimestamp" protobuf:"bytes,13,opt,name=requestReceivedTimestamp"` + // Time the request reached current audit stage. + // +optional + StageTimestamp metav1.MicroTime `json:"stageTimestamp" protobuf:"bytes,14,opt,name=stageTimestamp"` + + // Annotations is an unstructured key value map stored with an audit event that may be set by + // plugins invoked in the request serving chain, including authentication, authorization and + // admission plugins. Note that these annotations are for the audit event, and do not correspond + // to the metadata.annotations of the submitted object. Keys should uniquely identify the informing + // component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values + // should be short. Annotations are included in the Metadata level. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,15,rep,name=annotations"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of audit Events. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Policy defines the configuration of audit logging, and the rules for how different request +// categories are logged. +type Policy struct { + metav1.TypeMeta `json:",inline"` + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules specify the audit Level a request should be recorded at. + // A request may match multiple rules, in which case the FIRST matching rule is used. + // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. + // PolicyRules are strictly ordered. + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,3,rep,name=omitStages"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PolicyList is a list of audit Policies. +type PolicyList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []Policy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PolicyRule maps requests based off metadata to an audit Level. +// Requests must match the rules of every field (an intersection of rules). +type PolicyRule struct { + // The Level that requests matching this rule are recorded at. + Level Level `json:"level" protobuf:"bytes,1,opt,name=level,casttype=Level"` + + // The users (by authenticated user name) this rule applies to. + // An empty list implies every user. + // +optional + Users []string `json:"users,omitempty" protobuf:"bytes,2,rep,name=users"` + // The user groups this rule applies to. A user is considered matching + // if it is a member of any of the UserGroups. + // An empty list implies every user group. + // +optional + UserGroups []string `json:"userGroups,omitempty" protobuf:"bytes,3,rep,name=userGroups"` + + // The verbs that match this rule. + // An empty list implies every verb. + // +optional + Verbs []string `json:"verbs,omitempty" protobuf:"bytes,4,rep,name=verbs"` + + // Rules can apply to API resources (such as "pods" or "secrets"), + // non-resource URL paths (such as "/api"), or neither, but not both. + // If neither is specified, the rule is treated as a default for all URLs. + + // Resources that this rule matches. An empty list implies all kinds in all API groups. + // +optional + Resources []GroupResources `json:"resources,omitempty" protobuf:"bytes,5,rep,name=resources"` + // Namespaces that this rule matches. + // The empty string "" matches non-namespaced resources. + // An empty list implies every namespace. + // +optional + Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,6,rep,name=namespaces"` + + // NonResourceURLs is a set of URL paths that should be audited. + // *s are allowed, but only as the full, final step in the path. + // Examples: + // "/metrics" - Log requests for apiserver metrics + // "/healthz*" - Log all health checks + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,7,rep,name=nonResourceURLs"` + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. + // An empty list means no restrictions will apply. + // +optional + OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,8,rep,name=omitStages"` +} + +// GroupResources represents resource kinds in an API group. +type GroupResources struct { + // Group is the name of the API group that contains the resources. + // The empty string represents the core API group. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` + // ResourceNames is a list of resource instance names that the policy matches. + // Using this field requires Resources to be specified. + // An empty list implies that every instance of the resource is matched. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,3,rep,name=resourceNames"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // +optional + Resource string `json:"resource,omitempty" protobuf:"bytes,1,opt,name=resource"` + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // APIGroup is the name of the API group that contains the referred object. + // The empty string represents the core API group. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,5,opt,name=apiGroup"` + // APIVersion is the version of the API group that contains the referred object. + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,6,opt,name=apiVersion"` + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,7,opt,name=resourceVersion"` + // +optional + Subresource string `json:"subresource,omitempty" protobuf:"bytes,8,opt,name=subresource"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..4500bfe314 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go @@ -0,0 +1,322 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + audit "k8s.io/apiserver/pkg/apis/audit" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*Event)(nil), (*audit.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Event_To_audit_Event(a.(*Event), b.(*audit.Event), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.Event)(nil), (*Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_Event_To_v1_Event(a.(*audit.Event), b.(*Event), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*EventList)(nil), (*audit.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EventList_To_audit_EventList(a.(*EventList), b.(*audit.EventList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.EventList)(nil), (*EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_EventList_To_v1_EventList(a.(*audit.EventList), b.(*EventList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*GroupResources)(nil), (*audit.GroupResources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GroupResources_To_audit_GroupResources(a.(*GroupResources), b.(*audit.GroupResources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.GroupResources)(nil), (*GroupResources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_GroupResources_To_v1_GroupResources(a.(*audit.GroupResources), b.(*GroupResources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ObjectReference)(nil), (*audit.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ObjectReference_To_audit_ObjectReference(a.(*ObjectReference), b.(*audit.ObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.ObjectReference)(nil), (*ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_ObjectReference_To_v1_ObjectReference(a.(*audit.ObjectReference), b.(*ObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Policy)(nil), (*audit.Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Policy_To_audit_Policy(a.(*Policy), b.(*audit.Policy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.Policy)(nil), (*Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_Policy_To_v1_Policy(a.(*audit.Policy), b.(*Policy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PolicyList)(nil), (*audit.PolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PolicyList_To_audit_PolicyList(a.(*PolicyList), b.(*audit.PolicyList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.PolicyList)(nil), (*PolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_PolicyList_To_v1_PolicyList(a.(*audit.PolicyList), b.(*PolicyList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PolicyRule)(nil), (*audit.PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PolicyRule_To_audit_PolicyRule(a.(*PolicyRule), b.(*audit.PolicyRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.PolicyRule)(nil), (*PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_PolicyRule_To_v1_PolicyRule(a.(*audit.PolicyRule), b.(*PolicyRule), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { + out.Level = audit.Level(in.Level) + out.AuditID = types.UID(in.AuditID) + out.Stage = audit.Stage(in.Stage) + out.RequestURI = in.RequestURI + out.Verb = in.Verb + out.User = in.User + out.ImpersonatedUser = (*authenticationv1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) + out.UserAgent = in.UserAgent + out.ObjectRef = (*audit.ObjectReference)(unsafe.Pointer(in.ObjectRef)) + out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus)) + out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) + out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) + out.RequestReceivedTimestamp = in.RequestReceivedTimestamp + out.StageTimestamp = in.StageTimestamp + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + return nil +} + +// Convert_v1_Event_To_audit_Event is an autogenerated conversion function. +func Convert_v1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { + return autoConvert_v1_Event_To_audit_Event(in, out, s) +} + +func autoConvert_audit_Event_To_v1_Event(in *audit.Event, out *Event, s conversion.Scope) error { + out.Level = Level(in.Level) + out.AuditID = types.UID(in.AuditID) + out.Stage = Stage(in.Stage) + out.RequestURI = in.RequestURI + out.Verb = in.Verb + out.User = in.User + out.ImpersonatedUser = (*authenticationv1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) + out.UserAgent = in.UserAgent + out.ObjectRef = (*ObjectReference)(unsafe.Pointer(in.ObjectRef)) + out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus)) + out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) + out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) + out.RequestReceivedTimestamp = in.RequestReceivedTimestamp + out.StageTimestamp = in.StageTimestamp + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + return nil +} + +// Convert_audit_Event_To_v1_Event is an autogenerated conversion function. +func Convert_audit_Event_To_v1_Event(in *audit.Event, out *Event, s conversion.Scope) error { + return autoConvert_audit_Event_To_v1_Event(in, out, s) +} + +func autoConvert_v1_EventList_To_audit_EventList(in *EventList, out *audit.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]audit.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_EventList_To_audit_EventList is an autogenerated conversion function. +func Convert_v1_EventList_To_audit_EventList(in *EventList, out *audit.EventList, s conversion.Scope) error { + return autoConvert_v1_EventList_To_audit_EventList(in, out, s) +} + +func autoConvert_audit_EventList_To_v1_EventList(in *audit.EventList, out *EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_audit_EventList_To_v1_EventList is an autogenerated conversion function. +func Convert_audit_EventList_To_v1_EventList(in *audit.EventList, out *EventList, s conversion.Scope) error { + return autoConvert_audit_EventList_To_v1_EventList(in, out, s) +} + +func autoConvert_v1_GroupResources_To_audit_GroupResources(in *GroupResources, out *audit.GroupResources, s conversion.Scope) error { + out.Group = in.Group + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + return nil +} + +// Convert_v1_GroupResources_To_audit_GroupResources is an autogenerated conversion function. +func Convert_v1_GroupResources_To_audit_GroupResources(in *GroupResources, out *audit.GroupResources, s conversion.Scope) error { + return autoConvert_v1_GroupResources_To_audit_GroupResources(in, out, s) +} + +func autoConvert_audit_GroupResources_To_v1_GroupResources(in *audit.GroupResources, out *GroupResources, s conversion.Scope) error { + out.Group = in.Group + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + return nil +} + +// Convert_audit_GroupResources_To_v1_GroupResources is an autogenerated conversion function. +func Convert_audit_GroupResources_To_v1_GroupResources(in *audit.GroupResources, out *GroupResources, s conversion.Scope) error { + return autoConvert_audit_GroupResources_To_v1_GroupResources(in, out, s) +} + +func autoConvert_v1_ObjectReference_To_audit_ObjectReference(in *ObjectReference, out *audit.ObjectReference, s conversion.Scope) error { + out.Resource = in.Resource + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIGroup = in.APIGroup + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.Subresource = in.Subresource + return nil +} + +// Convert_v1_ObjectReference_To_audit_ObjectReference is an autogenerated conversion function. +func Convert_v1_ObjectReference_To_audit_ObjectReference(in *ObjectReference, out *audit.ObjectReference, s conversion.Scope) error { + return autoConvert_v1_ObjectReference_To_audit_ObjectReference(in, out, s) +} + +func autoConvert_audit_ObjectReference_To_v1_ObjectReference(in *audit.ObjectReference, out *ObjectReference, s conversion.Scope) error { + out.Resource = in.Resource + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIGroup = in.APIGroup + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.Subresource = in.Subresource + return nil +} + +// Convert_audit_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function. +func Convert_audit_ObjectReference_To_v1_ObjectReference(in *audit.ObjectReference, out *ObjectReference, s conversion.Scope) error { + return autoConvert_audit_ObjectReference_To_v1_ObjectReference(in, out, s) +} + +func autoConvert_v1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]audit.PolicyRule)(unsafe.Pointer(&in.Rules)) + out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + return nil +} + +// Convert_v1_Policy_To_audit_Policy is an autogenerated conversion function. +func Convert_v1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error { + return autoConvert_v1_Policy_To_audit_Policy(in, out, s) +} + +func autoConvert_audit_Policy_To_v1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + return nil +} + +// Convert_audit_Policy_To_v1_Policy is an autogenerated conversion function. +func Convert_audit_Policy_To_v1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error { + return autoConvert_audit_Policy_To_v1_Policy(in, out, s) +} + +func autoConvert_v1_PolicyList_To_audit_PolicyList(in *PolicyList, out *audit.PolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]audit.Policy)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_PolicyList_To_audit_PolicyList is an autogenerated conversion function. +func Convert_v1_PolicyList_To_audit_PolicyList(in *PolicyList, out *audit.PolicyList, s conversion.Scope) error { + return autoConvert_v1_PolicyList_To_audit_PolicyList(in, out, s) +} + +func autoConvert_audit_PolicyList_To_v1_PolicyList(in *audit.PolicyList, out *PolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Policy)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_audit_PolicyList_To_v1_PolicyList is an autogenerated conversion function. +func Convert_audit_PolicyList_To_v1_PolicyList(in *audit.PolicyList, out *PolicyList, s conversion.Scope) error { + return autoConvert_audit_PolicyList_To_v1_PolicyList(in, out, s) +} + +func autoConvert_v1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *audit.PolicyRule, s conversion.Scope) error { + out.Level = audit.Level(in.Level) + out.Users = *(*[]string)(unsafe.Pointer(&in.Users)) + out.UserGroups = *(*[]string)(unsafe.Pointer(&in.UserGroups)) + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.Resources = *(*[]audit.GroupResources)(unsafe.Pointer(&in.Resources)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + return nil +} + +// Convert_v1_PolicyRule_To_audit_PolicyRule is an autogenerated conversion function. +func Convert_v1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *audit.PolicyRule, s conversion.Scope) error { + return autoConvert_v1_PolicyRule_To_audit_PolicyRule(in, out, s) +} + +func autoConvert_audit_PolicyRule_To_v1_PolicyRule(in *audit.PolicyRule, out *PolicyRule, s conversion.Scope) error { + out.Level = Level(in.Level) + out.Users = *(*[]string)(unsafe.Pointer(&in.Users)) + out.UserGroups = *(*[]string)(unsafe.Pointer(&in.UserGroups)) + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.Resources = *(*[]GroupResources)(unsafe.Pointer(&in.Resources)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + return nil +} + +// Convert_audit_PolicyRule_To_v1_PolicyRule is an autogenerated conversion function. +func Convert_audit_PolicyRule_To_v1_PolicyRule(in *audit.PolicyRule, out *PolicyRule, s conversion.Scope) error { + return autoConvert_audit_PolicyRule_To_v1_PolicyRule(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..81d126d4e0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go @@ -0,0 +1,291 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.User.DeepCopyInto(&out.User) + if in.ImpersonatedUser != nil { + in, out := &in.ImpersonatedUser, &out.ImpersonatedUser + *out = new(authenticationv1.UserInfo) + (*in).DeepCopyInto(*out) + } + if in.SourceIPs != nil { + in, out := &in.SourceIPs, &out.SourceIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ObjectRef != nil { + in, out := &in.ObjectRef, &out.ObjectRef + *out = new(ObjectReference) + **out = **in + } + if in.ResponseStatus != nil { + in, out := &in.ResponseStatus, &out.ResponseStatus + *out = new(metav1.Status) + (*in).DeepCopyInto(*out) + } + if in.RequestObject != nil { + in, out := &in.RequestObject, &out.RequestObject + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + if in.ResponseObject != nil { + in, out := &in.ResponseObject, &out.ResponseObject + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + in.RequestReceivedTimestamp.DeepCopyInto(&out.RequestReceivedTimestamp) + in.StageTimestamp.DeepCopyInto(&out.StageTimestamp) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResources) DeepCopyInto(out *GroupResources) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResources. +func (in *GroupResources) DeepCopy() *GroupResources { + if in == nil { + return nil + } + out := new(GroupResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyList) DeepCopyInto(out *PolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { + if in == nil { + return nil + } + out := new(PolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UserGroups != nil { + in, out := &in.UserGroups, &out.UserGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]GroupResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..cce2e603a6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/conversion.go new file mode 100644 index 0000000000..78e5eab09d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/conversion.go @@ -0,0 +1,78 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apiserver/pkg/apis/audit" +) + +func Convert_audit_ObjectReference_To_v1alpha1_ObjectReference(in *audit.ObjectReference, out *ObjectReference, s conversion.Scope) error { + // Begin by copying all fields + if err := autoConvert_audit_ObjectReference_To_v1alpha1_ObjectReference(in, out, s); err != nil { + return err + } + // empty string means the core api group + if in.APIGroup == "" { + out.APIVersion = in.APIVersion + } else { + out.APIVersion = in.APIGroup + "/" + in.APIVersion + } + return nil +} + +func Convert_v1alpha1_ObjectReference_To_audit_ObjectReference(in *ObjectReference, out *audit.ObjectReference, s conversion.Scope) error { + // Begin by copying all fields + if err := autoConvert_v1alpha1_ObjectReference_To_audit_ObjectReference(in, out, s); err != nil { + return err + } + i := strings.LastIndex(in.APIVersion, "/") + if i == -1 { + // In fact it should always contain a "/" + out.APIVersion = in.APIVersion + } else { + out.APIGroup = in.APIVersion[:i] + out.APIVersion = in.APIVersion[i+1:] + } + return nil +} + +func Convert_v1alpha1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { + if err := autoConvert_v1alpha1_Event_To_audit_Event(in, out, s); err != nil { + return err + } + if out.StageTimestamp.IsZero() { + out.StageTimestamp = metav1.NewMicroTime(in.CreationTimestamp.Time) + } + if out.RequestReceivedTimestamp.IsZero() { + out.RequestReceivedTimestamp = metav1.NewMicroTime(in.Timestamp.Time) + } + return nil +} + +func Convert_audit_Event_To_v1alpha1_Event(in *audit.Event, out *Event, s conversion.Scope) error { + if err := autoConvert_audit_Event_To_v1alpha1_Event(in, out, s); err != nil { + return err + } + out.CreationTimestamp = metav1.NewTime(in.StageTimestamp.Time) + out.Timestamp = metav1.NewTime(in.RequestReceivedTimestamp.Time) + return nil + +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go new file mode 100644 index 0000000000..54a8440eb7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/audit +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=audit.k8s.io + +package v1alpha1 // import "k8s.io/apiserver/pkg/apis/audit/v1alpha1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..0b381d4242 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go @@ -0,0 +1,3241 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/authentication/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_46c0b2c8ea67b187, []int{0} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_46c0b2c8ea67b187, []int{1} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} + +var xxx_messageInfo_EventList proto.InternalMessageInfo + +func (m *GroupResources) Reset() { *m = GroupResources{} } +func (*GroupResources) ProtoMessage() {} +func (*GroupResources) Descriptor() ([]byte, []int) { + return fileDescriptor_46c0b2c8ea67b187, []int{2} +} +func (m *GroupResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupResources.Merge(m, src) +} +func (m *GroupResources) XXX_Size() int { + return m.Size() +} +func (m *GroupResources) XXX_DiscardUnknown() { + xxx_messageInfo_GroupResources.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupResources proto.InternalMessageInfo + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_46c0b2c8ea67b187, []int{3} +} +func (m *ObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectReference.Merge(m, src) +} +func (m *ObjectReference) XXX_Size() int { + return m.Size() +} +func (m *ObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectReference proto.InternalMessageInfo + +func (m *Policy) Reset() { *m = Policy{} } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { + return fileDescriptor_46c0b2c8ea67b187, []int{4} +} +func (m *Policy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Policy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Policy.Merge(m, src) +} +func (m *Policy) XXX_Size() int { + return m.Size() +} +func (m *Policy) XXX_DiscardUnknown() { + xxx_messageInfo_Policy.DiscardUnknown(m) +} + +var xxx_messageInfo_Policy proto.InternalMessageInfo + +func (m *PolicyList) Reset() { *m = PolicyList{} } +func (*PolicyList) ProtoMessage() {} +func (*PolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_46c0b2c8ea67b187, []int{5} +} +func (m *PolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyList.Merge(m, src) +} +func (m *PolicyList) XXX_Size() int { + return m.Size() +} +func (m *PolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyList proto.InternalMessageInfo + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_46c0b2c8ea67b187, []int{6} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Event)(nil), "k8s.io.apiserver.pkg.apis.audit.v1alpha1.Event") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apiserver.pkg.apis.audit.v1alpha1.Event.AnnotationsEntry") + proto.RegisterType((*EventList)(nil), "k8s.io.apiserver.pkg.apis.audit.v1alpha1.EventList") + proto.RegisterType((*GroupResources)(nil), "k8s.io.apiserver.pkg.apis.audit.v1alpha1.GroupResources") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.apiserver.pkg.apis.audit.v1alpha1.ObjectReference") + proto.RegisterType((*Policy)(nil), "k8s.io.apiserver.pkg.apis.audit.v1alpha1.Policy") + proto.RegisterType((*PolicyList)(nil), "k8s.io.apiserver.pkg.apis.audit.v1alpha1.PolicyList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.apiserver.pkg.apis.audit.v1alpha1.PolicyRule") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto", fileDescriptor_46c0b2c8ea67b187) +} + +var fileDescriptor_46c0b2c8ea67b187 = []byte{ + // 1263 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xd6, 0x71, 0x63, 0x8f, 0x1b, 0xc7, 0x99, 0x56, 0x74, 0x95, 0x83, 0x6d, 0x8c, 0x84, + 0x2c, 0x08, 0xbb, 0x49, 0x14, 0x68, 0x40, 0x02, 0x11, 0xab, 0x15, 0x58, 0x4a, 0x43, 0x78, 0x89, + 0x2b, 0xf1, 0xe7, 0xc0, 0xda, 0x7e, 0xb1, 0x17, 0xdb, 0xbb, 0xcb, 0xce, 0xac, 0xab, 0xdc, 0x38, + 0x70, 0x45, 0xe2, 0xce, 0x87, 0xe0, 0x23, 0x54, 0xdc, 0x72, 0xec, 0xb1, 0x27, 0x8b, 0x98, 0x6f, + 0x91, 0x03, 0x42, 0x33, 0xfb, 0x67, 0xd6, 0x4e, 0x2d, 0x1c, 0x0e, 0xbd, 0xed, 0xbc, 0xf7, 0x7b, + 0xbf, 0xf7, 0xe6, 0xed, 0xfb, 0x33, 0xe4, 0xeb, 0xc1, 0x01, 0x33, 0x6c, 0xd7, 0x1c, 0x04, 0x6d, + 0xf4, 0x1d, 0xe4, 0xc8, 0xcc, 0x31, 0x3a, 0x5d, 0xd7, 0x37, 0x23, 0x85, 0xe5, 0xd9, 0x0c, 0xfd, + 0x31, 0xfa, 0xa6, 0x37, 0xe8, 0xc9, 0x93, 0x69, 0x05, 0x5d, 0x9b, 0x9b, 0xe3, 0x5d, 0x6b, 0xe8, + 0xf5, 0xad, 0x5d, 0xb3, 0x87, 0x0e, 0xfa, 0x16, 0xc7, 0xae, 0xe1, 0xf9, 0x2e, 0x77, 0x69, 0x3d, + 0xb4, 0x34, 0x12, 0x4b, 0xc3, 0x1b, 0xf4, 0xe4, 0xc9, 0x90, 0x96, 0x46, 0x6c, 0xb9, 0xf5, 0x41, + 0xcf, 0xe6, 0xfd, 0xa0, 0x6d, 0x74, 0xdc, 0x91, 0xd9, 0x73, 0x7b, 0xae, 0x29, 0x09, 0xda, 0xc1, + 0xb9, 0x3c, 0xc9, 0x83, 0xfc, 0x0a, 0x89, 0xb7, 0xb6, 0x55, 0x48, 0xa6, 0x15, 0xf0, 0x3e, 0x3a, + 0xdc, 0xee, 0x58, 0xdc, 0x76, 0x1d, 0x73, 0x7c, 0x23, 0x8c, 0xad, 0x7d, 0x85, 0x1e, 0x59, 0x9d, + 0xbe, 0xed, 0xa0, 0x7f, 0xa1, 0xee, 0x30, 0x42, 0x6e, 0xbd, 0xce, 0xca, 0x5c, 0x64, 0xe5, 0x07, + 0x0e, 0xb7, 0x47, 0x78, 0xc3, 0xe0, 0xa3, 0xff, 0x32, 0x60, 0x9d, 0x3e, 0x8e, 0xac, 0x79, 0xbb, + 0xda, 0x1f, 0xf7, 0x48, 0xf6, 0xc9, 0x18, 0x1d, 0x4e, 0x7f, 0x20, 0x39, 0x11, 0x4d, 0xd7, 0xe2, + 0x96, 0xae, 0x55, 0xb5, 0x7a, 0x61, 0x6f, 0xc7, 0x50, 0x29, 0x4c, 0x48, 0x55, 0x16, 0x05, 0xda, + 0x18, 0xef, 0x1a, 0x5f, 0xb5, 0x7f, 0xc4, 0x0e, 0x7f, 0x8a, 0xdc, 0x6a, 0xd0, 0xcb, 0x49, 0x65, + 0x65, 0x3a, 0xa9, 0x10, 0x25, 0x83, 0x84, 0x95, 0x6e, 0x93, 0xec, 0x10, 0xc7, 0x38, 0xd4, 0xef, + 0x54, 0xb5, 0x7a, 0xbe, 0xf1, 0x56, 0x04, 0xce, 0x1e, 0x09, 0xe1, 0x75, 0xfc, 0x01, 0x21, 0x88, + 0x7e, 0x47, 0xf2, 0x22, 0x70, 0xc6, 0xad, 0x91, 0xa7, 0x67, 0x64, 0x40, 0xef, 0x2d, 0x17, 0xd0, + 0x99, 0x3d, 0xc2, 0xc6, 0x66, 0xc4, 0x9e, 0x3f, 0x8b, 0x49, 0x40, 0xf1, 0xd1, 0x63, 0xb2, 0x26, + 0x8b, 0xa0, 0xf9, 0x58, 0x5f, 0x95, 0xc1, 0xec, 0x47, 0xf0, 0xb5, 0xc3, 0x50, 0x7c, 0x3d, 0xa9, + 0xbc, 0xbd, 0x28, 0xa5, 0xfc, 0xc2, 0x43, 0x66, 0xb4, 0x9a, 0x8f, 0x21, 0x26, 0x11, 0x57, 0x63, + 0xdc, 0xea, 0xa1, 0x9e, 0x9d, 0xbd, 0xda, 0xa9, 0x10, 0x5e, 0xc7, 0x1f, 0x10, 0x82, 0xe8, 0x1e, + 0x21, 0x3e, 0xfe, 0x14, 0x20, 0xe3, 0x2d, 0x68, 0xea, 0x77, 0xa5, 0x49, 0x92, 0x3a, 0x48, 0x34, + 0x90, 0x42, 0xd1, 0x2a, 0x59, 0x1d, 0xa3, 0xdf, 0xd6, 0xd7, 0x24, 0xfa, 0x5e, 0x84, 0x5e, 0x7d, + 0x86, 0x7e, 0x1b, 0xa4, 0x86, 0x7e, 0x49, 0x56, 0x03, 0x86, 0xbe, 0x9e, 0x93, 0xb9, 0x7a, 0x37, + 0x95, 0x2b, 0x63, 0xb6, 0x4c, 0x45, 0x8e, 0x5a, 0x0c, 0xfd, 0xa6, 0x73, 0xee, 0x2a, 0x26, 0x21, + 0x01, 0xc9, 0x40, 0xfb, 0xa4, 0x64, 0x8f, 0x3c, 0xf4, 0x99, 0xeb, 0x88, 0x52, 0x11, 0x1a, 0x3d, + 0x7f, 0x2b, 0xd6, 0x07, 0xd3, 0x49, 0xa5, 0xd4, 0x9c, 0xe3, 0x80, 0x1b, 0xac, 0xf4, 0x7d, 0x92, + 0x67, 0x6e, 0xe0, 0x77, 0xb0, 0x79, 0xc2, 0x74, 0x52, 0xcd, 0xd4, 0xf3, 0x8d, 0x75, 0xf1, 0xd3, + 0x4e, 0x63, 0x21, 0x28, 0x3d, 0x35, 0x49, 0x5e, 0x84, 0x77, 0xd8, 0x43, 0x87, 0xeb, 0x54, 0xe6, + 0x21, 0xf9, 0xcb, 0xad, 0x58, 0x01, 0x0a, 0x43, 0xcf, 0x49, 0xde, 0x95, 0x85, 0x08, 0x78, 0xae, + 0x17, 0xe4, 0x05, 0x3e, 0x36, 0x96, 0x1d, 0x0b, 0x51, 0x5d, 0x03, 0x9e, 0xa3, 0x8f, 0x4e, 0x07, + 0xc3, 0xc0, 0x12, 0x21, 0x28, 0x6a, 0xda, 0x27, 0x45, 0x1f, 0x99, 0xe7, 0x3a, 0x0c, 0x4f, 0xb9, + 0xc5, 0x03, 0xa6, 0xdf, 0x93, 0xce, 0xb6, 0x97, 0xab, 0xd7, 0xd0, 0xa6, 0x41, 0xa7, 0x93, 0x4a, + 0x11, 0x66, 0x78, 0x60, 0x8e, 0x97, 0x5a, 0x64, 0x3d, 0xaa, 0x89, 0x30, 0x10, 0x7d, 0x5d, 0x3a, + 0xaa, 0x2f, 0x74, 0x14, 0xb5, 0xbf, 0xd1, 0x72, 0x06, 0x8e, 0xfb, 0xdc, 0x69, 0x6c, 0x4e, 0x27, + 0x95, 0x75, 0x48, 0x53, 0xc0, 0x2c, 0x23, 0xed, 0xaa, 0xcb, 0x44, 0x3e, 0x8a, 0xb7, 0xf4, 0x31, + 0x73, 0x91, 0xc8, 0xc9, 0x1c, 0x27, 0xfd, 0x55, 0x23, 0x7a, 0xe4, 0x17, 0xb0, 0x83, 0xf6, 0x18, + 0xbb, 0x49, 0xa3, 0xea, 0x1b, 0xd2, 0xa1, 0xb9, 0x5c, 0xf6, 0x9e, 0xda, 0x1d, 0xdf, 0x95, 0x2d, + 0x5f, 0x8d, 0x8a, 0x41, 0x87, 0x05, 0xc4, 0xb0, 0xd0, 0x25, 0x75, 0x49, 0x51, 0xf6, 0xa6, 0x0a, + 0xa2, 0xf4, 0xff, 0x82, 0x88, 0x5b, 0xbf, 0x78, 0x3a, 0x43, 0x07, 0x73, 0xf4, 0xf4, 0x39, 0x29, + 0x58, 0x8e, 0xe3, 0x72, 0xd9, 0x3b, 0x4c, 0xdf, 0xac, 0x66, 0xea, 0x85, 0xbd, 0xcf, 0x97, 0xaf, + 0x4e, 0x39, 0xb4, 0x8d, 0x43, 0x45, 0xf1, 0xc4, 0xe1, 0xfe, 0x45, 0xe3, 0x7e, 0xe4, 0xbe, 0x90, + 0xd2, 0x40, 0xda, 0xd3, 0xd6, 0x67, 0xa4, 0x34, 0x6f, 0x45, 0x4b, 0x24, 0x33, 0xc0, 0x0b, 0x39, + 0xf6, 0xf3, 0x20, 0x3e, 0xe9, 0x03, 0x92, 0x1d, 0x5b, 0xc3, 0x00, 0xc3, 0x59, 0x0d, 0xe1, 0xe1, + 0x93, 0x3b, 0x07, 0x5a, 0xed, 0x85, 0x46, 0xf2, 0xd2, 0xf9, 0x91, 0xcd, 0x38, 0xfd, 0xfe, 0xc6, + 0xd6, 0x30, 0x96, 0xcb, 0x98, 0xb0, 0x96, 0x3b, 0xa3, 0x14, 0x45, 0x9c, 0x8b, 0x25, 0xa9, 0x8d, + 0x71, 0x46, 0xb2, 0x36, 0xc7, 0x11, 0xd3, 0xef, 0xc8, 0xf4, 0x98, 0xb7, 0x4c, 0x4f, 0x63, 0x3d, + 0x9e, 0xc3, 0x4d, 0xc1, 0x02, 0x21, 0x59, 0xed, 0x77, 0x8d, 0x14, 0xbf, 0xf0, 0xdd, 0xc0, 0x03, + 0x0c, 0x87, 0x0b, 0xa3, 0xef, 0x90, 0x6c, 0x4f, 0x48, 0xc2, 0x14, 0x28, 0xbb, 0x10, 0x16, 0xea, + 0xc4, 0xb0, 0xf2, 0x63, 0x0b, 0x19, 0x51, 0x34, 0xac, 0x12, 0x1a, 0x50, 0x7a, 0xfa, 0x48, 0x74, + 0x6a, 0x78, 0x38, 0xb6, 0x46, 0xc8, 0xf4, 0x8c, 0x34, 0x88, 0xfa, 0x2f, 0xa5, 0x80, 0x59, 0x5c, + 0xed, 0x97, 0x0c, 0xd9, 0x98, 0x1b, 0x3d, 0x74, 0x9b, 0xe4, 0x62, 0x50, 0x14, 0x61, 0x92, 0xb5, + 0x98, 0x0b, 0x12, 0x84, 0x98, 0x93, 0x8e, 0xa0, 0xf2, 0xac, 0x4e, 0xf4, 0xff, 0xd4, 0x9c, 0x3c, + 0x8e, 0x15, 0xa0, 0x30, 0x62, 0xb7, 0x88, 0x83, 0xdc, 0xb2, 0xa9, 0xdd, 0x22, 0xb0, 0x20, 0x35, + 0xb4, 0x41, 0x32, 0x81, 0xdd, 0x8d, 0x76, 0xe5, 0x4e, 0x04, 0xc8, 0xb4, 0x96, 0xdd, 0x93, 0xc2, + 0x58, 0x6c, 0x3d, 0xcb, 0xb3, 0x9f, 0xa1, 0xcf, 0x6c, 0xd7, 0x89, 0x16, 0x65, 0xb2, 0xf5, 0x0e, + 0x4f, 0x9a, 0x91, 0x06, 0x52, 0x28, 0x7a, 0x48, 0x36, 0xe2, 0x6b, 0xc5, 0x86, 0xe1, 0xba, 0x7c, + 0x18, 0x19, 0x6e, 0xc0, 0xac, 0x1a, 0xe6, 0xf1, 0xf4, 0x43, 0x52, 0x60, 0x41, 0x3b, 0x49, 0x5f, + 0xb8, 0x3f, 0x93, 0x36, 0x39, 0x55, 0x2a, 0x48, 0xe3, 0x6a, 0xff, 0x68, 0xe4, 0xee, 0x89, 0x3b, + 0xb4, 0x3b, 0x17, 0x6f, 0xe0, 0x65, 0xf4, 0x0d, 0xc9, 0xfa, 0xc1, 0x10, 0xe3, 0x3a, 0xdf, 0x5f, + 0xbe, 0xce, 0xc3, 0x10, 0x21, 0x18, 0xa2, 0x2a, 0x5a, 0x71, 0x62, 0x10, 0x32, 0xd2, 0x47, 0x84, + 0xb8, 0x23, 0x9b, 0xcb, 0x69, 0x14, 0x17, 0xe1, 0x43, 0x19, 0x48, 0x22, 0x55, 0xef, 0x93, 0x14, + 0xb4, 0xf6, 0xa7, 0x46, 0x48, 0xc8, 0xfe, 0x06, 0x1a, 0xbd, 0x35, 0xdb, 0xe8, 0x3b, 0xb7, 0x4d, + 0xc0, 0x82, 0x4e, 0x7f, 0x91, 0x89, 0xef, 0x20, 0x72, 0xa2, 0x1e, 0xa0, 0xda, 0x32, 0x0f, 0xd0, + 0x0a, 0xc9, 0x8a, 0xa7, 0x44, 0xdc, 0xea, 0x79, 0x81, 0x14, 0xcf, 0x0c, 0x06, 0xa1, 0x9c, 0x1a, + 0x84, 0x88, 0x0f, 0x39, 0x23, 0xe2, 0xd4, 0x16, 0x45, 0x6a, 0x5b, 0x89, 0x14, 0x52, 0x08, 0x41, + 0x28, 0x1e, 0x6a, 0x4c, 0x5f, 0x55, 0x84, 0xe2, 0xfd, 0xc6, 0x20, 0x94, 0x53, 0x3b, 0x3d, 0x60, + 0xb2, 0x32, 0x13, 0x07, 0xcb, 0x67, 0x62, 0x76, 0xa4, 0xa9, 0x96, 0x7f, 0xed, 0x78, 0x32, 0x08, + 0x49, 0xfa, 0x9f, 0xe9, 0x77, 0x55, 0xec, 0xc9, 0x80, 0x60, 0x90, 0x42, 0xd0, 0x4f, 0xc9, 0x86, + 0xe3, 0x3a, 0x31, 0x55, 0x0b, 0x8e, 0x98, 0xbe, 0x26, 0x8d, 0xee, 0x8b, 0x26, 0x3c, 0x9e, 0x55, + 0xc1, 0x3c, 0x76, 0xae, 0x0a, 0x73, 0x4b, 0x57, 0x61, 0xc3, 0xb8, 0xbc, 0x2a, 0xaf, 0xbc, 0xbc, + 0x2a, 0xaf, 0xbc, 0xba, 0x2a, 0xaf, 0xfc, 0x3c, 0x2d, 0x6b, 0x97, 0xd3, 0xb2, 0xf6, 0x72, 0x5a, + 0xd6, 0x5e, 0x4d, 0xcb, 0xda, 0x5f, 0xd3, 0xb2, 0xf6, 0xdb, 0xdf, 0xe5, 0x95, 0x6f, 0x73, 0x71, + 0x12, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x19, 0xf8, 0xf6, 0xd0, 0x49, 0x0e, 0x00, 0x00, +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UserAgent) + copy(dAtA[i:], m.UserAgent) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserAgent))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + { + size, err := m.StageTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + { + size, err := m.RequestReceivedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + if m.ResponseObject != nil { + { + size, err := m.ResponseObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.RequestObject != nil { + { + size, err := m.RequestObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if m.ResponseStatus != nil { + { + size, err := m.ResponseStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.ObjectRef != nil { + { + size, err := m.ObjectRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if len(m.SourceIPs) > 0 { + for iNdEx := len(m.SourceIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceIPs[iNdEx]) + copy(dAtA[i:], m.SourceIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SourceIPs[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if m.ImpersonatedUser != nil { + { + size, err := m.ImpersonatedUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x3a + i -= len(m.RequestURI) + copy(dAtA[i:], m.RequestURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestURI))) + i-- + dAtA[i] = 0x32 + i -= len(m.Stage) + copy(dAtA[i:], m.Stage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Stage))) + i-- + dAtA[i] = 0x2a + i -= len(m.AuditID) + copy(dAtA[i:], m.AuditID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuditID))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x3a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Policy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OmitStages) > 0 { + for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OmitStages[iNdEx]) + copy(dAtA[i:], m.OmitStages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OmitStages[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OmitStages) > 0 { + for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OmitStages[iNdEx]) + copy(dAtA[i:], m.OmitStages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OmitStages[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.UserGroups) > 0 { + for iNdEx := len(m.UserGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UserGroups[iNdEx]) + copy(dAtA[i:], m.UserGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserGroups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Users) > 0 { + for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Users[iNdEx]) + copy(dAtA[i:], m.Users[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Users[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Timestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuditID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Stage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RequestURI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.ImpersonatedUser != nil { + l = m.ImpersonatedUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.SourceIPs) > 0 { + for _, s := range m.SourceIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ObjectRef != nil { + l = m.ObjectRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResponseStatus != nil { + l = m.ResponseStatus.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RequestObject != nil { + l = m.RequestObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResponseObject != nil { + l = m.ResponseObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.RequestReceivedTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.StageTimestamp.Size() + n += 2 + l + sovGenerated(uint64(l)) + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.UserAgent) + n += 2 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Policy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.OmitStages) > 0 { + for _, s := range m.OmitStages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.UserGroups) > 0 { + for _, s := range m.UserGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.OmitStages) > 0 { + for _, s := range m.OmitStages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `AuditID:` + fmt.Sprintf("%v", this.AuditID) + `,`, + `Stage:` + fmt.Sprintf("%v", this.Stage) + `,`, + `RequestURI:` + fmt.Sprintf("%v", this.RequestURI) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `ImpersonatedUser:` + strings.Replace(fmt.Sprintf("%v", this.ImpersonatedUser), "UserInfo", "v11.UserInfo", 1) + `,`, + `SourceIPs:` + fmt.Sprintf("%v", this.SourceIPs) + `,`, + `ObjectRef:` + strings.Replace(this.ObjectRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `ResponseStatus:` + strings.Replace(fmt.Sprintf("%v", this.ResponseStatus), "Status", "v1.Status", 1) + `,`, + `RequestObject:` + strings.Replace(fmt.Sprintf("%v", this.RequestObject), "Unknown", "runtime.Unknown", 1) + `,`, + `ResponseObject:` + strings.Replace(fmt.Sprintf("%v", this.ResponseObject), "Unknown", "runtime.Unknown", 1) + `,`, + `RequestReceivedTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequestReceivedTimestamp), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `StageTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StageTimestamp), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `UserAgent:` + fmt.Sprintf("%v", this.UserAgent) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *GroupResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupResources{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectReference{`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `}`, + }, "") + return s +} +func (this *Policy) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&Policy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Policy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Policy", "Policy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]GroupResources{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "GroupResources", "GroupResources", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&PolicyRule{`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `Users:` + fmt.Sprintf("%v", this.Users) + `,`, + `UserGroups:` + fmt.Sprintf("%v", this.UserGroups) + `,`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = Level(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuditID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stage = Stage(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImpersonatedUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImpersonatedUser == nil { + m.ImpersonatedUser = &v11.UserInfo{} + } + if err := m.ImpersonatedUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceIPs = append(m.SourceIPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectRef == nil { + m.ObjectRef = &ObjectReference{} + } + if err := m.ObjectRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseStatus == nil { + m.ResponseStatus = &v1.Status{} + } + if err := m.ResponseStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestObject == nil { + m.RequestObject = &runtime.Unknown{} + } + if err := m.RequestObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseObject == nil { + m.ResponseObject = &runtime.Unknown{} + } + if err := m.ResponseObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestReceivedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RequestReceivedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StageTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StageTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserAgent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserAgent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Policy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Policy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Policy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = Level(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserGroups = append(m.UserGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, GroupResources{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto new file mode 100644 index 0000000000..4d490ff96e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto @@ -0,0 +1,250 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.apiserver.pkg.apis.audit.v1alpha1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Event captures all the information that can be included in an API audit log. +message Event { + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // AuditLevel at which event was generated + optional string level = 2; + + // Time the request reached the apiserver. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 3; + + // Unique audit ID, generated for each request. + optional string auditID = 4; + + // Stage of the request handling when this event instance was generated. + optional string stage = 5; + + // RequestURI is the request URI as sent by the client to a server. + optional string requestURI = 6; + + // Verb is the kubernetes verb associated with the request. + // For non-resource requests, this is the lower-cased HTTP method. + optional string verb = 7; + + // Authenticated user information. + optional k8s.io.api.authentication.v1.UserInfo user = 8; + + // Impersonated user information. + // +optional + optional k8s.io.api.authentication.v1.UserInfo impersonatedUser = 9; + + // Source IPs, from where the request originated and intermediate proxies. + // +optional + repeated string sourceIPs = 10; + + // UserAgent records the user agent string reported by the client. + // Note that the UserAgent is provided by the client, and must not be trusted. + // +optional + optional string userAgent = 18; + + // Object reference this request is targeted at. + // Does not apply for List-type requests, or non-resource requests. + // +optional + optional ObjectReference objectRef = 11; + + // The response status, populated even when the ResponseObject is not a Status type. + // For successful responses, this will only include the Code and StatusSuccess. + // For non-status type error responses, this will be auto-populated with the error Message. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status responseStatus = 12; + + // API object from the request, in JSON format. The RequestObject is recorded as-is in the request + // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or + // merging. It is an external versioned object type, and may not be a valid object on its own. + // Omitted for non-resource requests. Only logged at Request Level and higher. + // +optional + optional k8s.io.apimachinery.pkg.runtime.Unknown requestObject = 13; + + // API object returned in the response, in JSON. The ResponseObject is recorded after conversion + // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged + // at Response Level. + // +optional + optional k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 14; + + // Time the request reached the apiserver. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 15; + + // Time the request reached current audit stage. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 16; + + // Annotations is an unstructured key value map stored with an audit event that may be set by + // plugins invoked in the request serving chain, including authentication, authorization and + // admission plugins. Note that these annotations are for the audit event, and do not correspond + // to the metadata.annotations of the submitted object. Keys should uniquely identify the informing + // component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values + // should be short. Annotations are included in the Metadata level. + // +optional + map annotations = 17; +} + +// EventList is a list of audit Events. +message EventList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated Event items = 2; +} + +// GroupResources represents resource kinds in an API group. +message GroupResources { + // Group is the name of the API group that contains the resources. + // The empty string represents the core API group. + // +optional + optional string group = 1; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. + // +optional + repeated string resources = 2; + + // ResourceNames is a list of resource instance names that the policy matches. + // Using this field requires Resources to be specified. + // An empty list implies that every instance of the resource is matched. + // +optional + repeated string resourceNames = 3; +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +message ObjectReference { + // +optional + optional string resource = 1; + + // +optional + optional string namespace = 2; + + // +optional + optional string name = 3; + + // +optional + optional string uid = 4; + + // +optional + optional string apiVersion = 5; + + // +optional + optional string resourceVersion = 6; + + // +optional + optional string subresource = 7; +} + +// Policy defines the configuration of audit logging, and the rules for how different request +// categories are logged. +message Policy { + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules specify the audit Level a request should be recorded at. + // A request may match multiple rules, in which case the FIRST matching rule is used. + // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. + // PolicyRules are strictly ordered. + repeated PolicyRule rules = 2; + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + repeated string omitStages = 3; +} + +// PolicyList is a list of audit Policies. +message PolicyList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated Policy items = 2; +} + +// PolicyRule maps requests based off metadata to an audit Level. +// Requests must match the rules of every field (an intersection of rules). +message PolicyRule { + // The Level that requests matching this rule are recorded at. + optional string level = 1; + + // The users (by authenticated user name) this rule applies to. + // An empty list implies every user. + // +optional + repeated string users = 2; + + // The user groups this rule applies to. A user is considered matching + // if it is a member of any of the UserGroups. + // An empty list implies every user group. + // +optional + repeated string userGroups = 3; + + // The verbs that match this rule. + // An empty list implies every verb. + // +optional + repeated string verbs = 4; + + // Resources that this rule matches. An empty list implies all kinds in all API groups. + // +optional + repeated GroupResources resources = 5; + + // Namespaces that this rule matches. + // The empty string "" matches non-namespaced resources. + // An empty list implies every namespace. + // +optional + repeated string namespaces = 6; + + // NonResourceURLs is a set of URL paths that should be audited. + // *s are allowed, but only as the full, final step in the path. + // Examples: + // "/metrics" - Log requests for apiserver metrics + // "/healthz*" - Log all health checks + // +optional + repeated string nonResourceURLs = 7; + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. + // An empty list means no restrictions will apply. + // +optional + repeated string omitStages = 8; +} + diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/register.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/register.go new file mode 100644 index 0000000000..903e6a4ae7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "audit.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Event{}, + &EventList{}, + &Policy{}, + &PolicyList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go new file mode 100644 index 0000000000..4b4b7f25c6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go @@ -0,0 +1,287 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + authnv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// Header keys used by the audit system. +const ( + // Header to hold the audit ID as the request is propagated through the serving hierarchy. The + // Audit-ID header should be set by the first server to receive the request (e.g. the federation + // server or kube-aggregator). + // + // Audit ID is also returned to client by http response header. + // It's not guaranteed Audit-Id http header is sent for all requests. When kube-apiserver didn't + // audit the events according to the audit policy, no Audit-ID is returned. Also, for request to + // pods/exec, pods/attach, pods/proxy, kube-apiserver works like a proxy and redirect the request + // to kubelet node, users will only get http headers sent from kubelet node, so no Audit-ID is + // sent when users run command like "kubectl exec" or "kubectl attach". + HeaderAuditID = "Audit-ID" +) + +// Level defines the amount of information logged during auditing +type Level string + +// Valid audit levels +const ( + // LevelNone disables auditing + LevelNone Level = "None" + // LevelMetadata provides the basic level of auditing. + LevelMetadata Level = "Metadata" + // LevelRequest provides Metadata level of auditing, and additionally + // logs the request object (does not apply for non-resource requests). + LevelRequest Level = "Request" + // LevelRequestResponse provides Request level of auditing, and additionally + // logs the response object (does not apply for non-resource requests). + LevelRequestResponse Level = "RequestResponse" +) + +// Stage defines the stages in request handling that audit events may be generated. +type Stage string + +// Valid audit stages. +const ( + // The stage for events generated as soon as the audit handler receives the request, and before it + // is delegated down the handler chain. + StageRequestReceived = "RequestReceived" + // The stage for events generated once the response headers are sent, but before the response body + // is sent. This stage is only generated for long-running requests (e.g. watch). + StageResponseStarted = "ResponseStarted" + // The stage for events generated once the response body has been completed, and no more bytes + // will be sent. + StageResponseComplete = "ResponseComplete" + // The stage for events generated when a panic occurred. + StagePanic = "Panic" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event captures all the information that can be included in an API audit log. +type Event struct { + metav1.TypeMeta `json:",inline"` + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // AuditLevel at which event was generated + Level Level `json:"level" protobuf:"bytes,2,opt,name=level,casttype=Level"` + + // Time the request reached the apiserver. + Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,3,opt,name=timestamp"` + // Unique audit ID, generated for each request. + AuditID types.UID `json:"auditID" protobuf:"bytes,4,opt,name=auditID,casttype=k8s.io/apimachinery/pkg/types.UID"` + // Stage of the request handling when this event instance was generated. + Stage Stage `json:"stage" protobuf:"bytes,5,opt,name=stage,casttype=Stage"` + + // RequestURI is the request URI as sent by the client to a server. + RequestURI string `json:"requestURI" protobuf:"bytes,6,opt,name=requestURI"` + // Verb is the kubernetes verb associated with the request. + // For non-resource requests, this is the lower-cased HTTP method. + Verb string `json:"verb" protobuf:"bytes,7,opt,name=verb"` + // Authenticated user information. + User authnv1.UserInfo `json:"user" protobuf:"bytes,8,opt,name=user"` + // Impersonated user information. + // +optional + ImpersonatedUser *authnv1.UserInfo `json:"impersonatedUser,omitempty" protobuf:"bytes,9,opt,name=impersonatedUser"` + // Source IPs, from where the request originated and intermediate proxies. + // +optional + SourceIPs []string `json:"sourceIPs,omitempty" protobuf:"bytes,10,rep,name=sourceIPs"` + // UserAgent records the user agent string reported by the client. + // Note that the UserAgent is provided by the client, and must not be trusted. + // +optional + UserAgent string `json:"userAgent,omitempty" protobuf:"bytes,18,opt,name=userAgent"` + // Object reference this request is targeted at. + // Does not apply for List-type requests, or non-resource requests. + // +optional + ObjectRef *ObjectReference `json:"objectRef,omitempty" protobuf:"bytes,11,opt,name=objectRef"` + // The response status, populated even when the ResponseObject is not a Status type. + // For successful responses, this will only include the Code and StatusSuccess. + // For non-status type error responses, this will be auto-populated with the error Message. + // +optional + ResponseStatus *metav1.Status `json:"responseStatus,omitempty" protobuf:"bytes,12,opt,name=responseStatus"` + + // API object from the request, in JSON format. The RequestObject is recorded as-is in the request + // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or + // merging. It is an external versioned object type, and may not be a valid object on its own. + // Omitted for non-resource requests. Only logged at Request Level and higher. + // +optional + RequestObject *runtime.Unknown `json:"requestObject,omitempty" protobuf:"bytes,13,opt,name=requestObject"` + // API object returned in the response, in JSON. The ResponseObject is recorded after conversion + // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged + // at Response Level. + // +optional + ResponseObject *runtime.Unknown `json:"responseObject,omitempty" protobuf:"bytes,14,opt,name=responseObject"` + // Time the request reached the apiserver. + // +optional + RequestReceivedTimestamp metav1.MicroTime `json:"requestReceivedTimestamp" protobuf:"bytes,15,opt,name=requestReceivedTimestamp"` + // Time the request reached current audit stage. + // +optional + StageTimestamp metav1.MicroTime `json:"stageTimestamp" protobuf:"bytes,16,opt,name=stageTimestamp"` + + // Annotations is an unstructured key value map stored with an audit event that may be set by + // plugins invoked in the request serving chain, including authentication, authorization and + // admission plugins. Note that these annotations are for the audit event, and do not correspond + // to the metadata.annotations of the submitted object. Keys should uniquely identify the informing + // component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values + // should be short. Annotations are included in the Metadata level. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,17,rep,name=annotations"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of audit Events. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Policy defines the configuration of audit logging, and the rules for how different request +// categories are logged. +type Policy struct { + metav1.TypeMeta `json:",inline"` + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules specify the audit Level a request should be recorded at. + // A request may match multiple rules, in which case the FIRST matching rule is used. + // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. + // PolicyRules are strictly ordered. + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,3,rep,name=omitStages"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PolicyList is a list of audit Policies. +type PolicyList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []Policy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PolicyRule maps requests based off metadata to an audit Level. +// Requests must match the rules of every field (an intersection of rules). +type PolicyRule struct { + // The Level that requests matching this rule are recorded at. + Level Level `json:"level" protobuf:"bytes,1,opt,name=level,casttype=Level"` + + // The users (by authenticated user name) this rule applies to. + // An empty list implies every user. + // +optional + Users []string `json:"users,omitempty" protobuf:"bytes,2,rep,name=users"` + // The user groups this rule applies to. A user is considered matching + // if it is a member of any of the UserGroups. + // An empty list implies every user group. + // +optional + UserGroups []string `json:"userGroups,omitempty" protobuf:"bytes,3,rep,name=userGroups"` + + // The verbs that match this rule. + // An empty list implies every verb. + // +optional + Verbs []string `json:"verbs,omitempty" protobuf:"bytes,4,rep,name=verbs"` + + // Rules can apply to API resources (such as "pods" or "secrets"), + // non-resource URL paths (such as "/api"), or neither, but not both. + // If neither is specified, the rule is treated as a default for all URLs. + + // Resources that this rule matches. An empty list implies all kinds in all API groups. + // +optional + Resources []GroupResources `json:"resources,omitempty" protobuf:"bytes,5,rep,name=resources"` + // Namespaces that this rule matches. + // The empty string "" matches non-namespaced resources. + // An empty list implies every namespace. + // +optional + Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,6,rep,name=namespaces"` + + // NonResourceURLs is a set of URL paths that should be audited. + // *s are allowed, but only as the full, final step in the path. + // Examples: + // "/metrics" - Log requests for apiserver metrics + // "/healthz*" - Log all health checks + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,7,rep,name=nonResourceURLs"` + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. + // An empty list means no restrictions will apply. + // +optional + OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,8,rep,name=omitStages"` +} + +// GroupResources represents resource kinds in an API group. +type GroupResources struct { + // Group is the name of the API group that contains the resources. + // The empty string represents the core API group. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` + // ResourceNames is a list of resource instance names that the policy matches. + // Using this field requires Resources to be specified. + // An empty list implies that every instance of the resource is matched. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,3,rep,name=resourceNames"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // +optional + Resource string `json:"resource,omitempty" protobuf:"bytes,1,opt,name=resource"` + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + // +optional + Subresource string `json:"subresource,omitempty" protobuf:"bytes,7,opt,name=subresource"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go new file mode 100644 index 0000000000..74a1842053 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,339 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + audit "k8s.io/apiserver/pkg/apis/audit" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*EventList)(nil), (*audit.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_EventList_To_audit_EventList(a.(*EventList), b.(*audit.EventList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.EventList)(nil), (*EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_EventList_To_v1alpha1_EventList(a.(*audit.EventList), b.(*EventList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*GroupResources)(nil), (*audit.GroupResources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_GroupResources_To_audit_GroupResources(a.(*GroupResources), b.(*audit.GroupResources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.GroupResources)(nil), (*GroupResources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_GroupResources_To_v1alpha1_GroupResources(a.(*audit.GroupResources), b.(*GroupResources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Policy)(nil), (*audit.Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Policy_To_audit_Policy(a.(*Policy), b.(*audit.Policy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.Policy)(nil), (*Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_Policy_To_v1alpha1_Policy(a.(*audit.Policy), b.(*Policy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PolicyList)(nil), (*audit.PolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PolicyList_To_audit_PolicyList(a.(*PolicyList), b.(*audit.PolicyList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.PolicyList)(nil), (*PolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_PolicyList_To_v1alpha1_PolicyList(a.(*audit.PolicyList), b.(*PolicyList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PolicyRule)(nil), (*audit.PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PolicyRule_To_audit_PolicyRule(a.(*PolicyRule), b.(*audit.PolicyRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.PolicyRule)(nil), (*PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_PolicyRule_To_v1alpha1_PolicyRule(a.(*audit.PolicyRule), b.(*PolicyRule), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*audit.Event)(nil), (*Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_Event_To_v1alpha1_Event(a.(*audit.Event), b.(*Event), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*audit.ObjectReference)(nil), (*ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_ObjectReference_To_v1alpha1_ObjectReference(a.(*audit.ObjectReference), b.(*ObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*Event)(nil), (*audit.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Event_To_audit_Event(a.(*Event), b.(*audit.Event), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ObjectReference)(nil), (*audit.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ObjectReference_To_audit_ObjectReference(a.(*ObjectReference), b.(*audit.ObjectReference), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { + // WARNING: in.ObjectMeta requires manual conversion: does not exist in peer-type + out.Level = audit.Level(in.Level) + // WARNING: in.Timestamp requires manual conversion: does not exist in peer-type + out.AuditID = types.UID(in.AuditID) + out.Stage = audit.Stage(in.Stage) + out.RequestURI = in.RequestURI + out.Verb = in.Verb + out.User = in.User + out.ImpersonatedUser = (*v1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) + out.UserAgent = in.UserAgent + if in.ObjectRef != nil { + in, out := &in.ObjectRef, &out.ObjectRef + *out = new(audit.ObjectReference) + if err := Convert_v1alpha1_ObjectReference_To_audit_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.ObjectRef = nil + } + out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus)) + out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) + out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) + out.RequestReceivedTimestamp = in.RequestReceivedTimestamp + out.StageTimestamp = in.StageTimestamp + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + return nil +} + +func autoConvert_audit_Event_To_v1alpha1_Event(in *audit.Event, out *Event, s conversion.Scope) error { + out.Level = Level(in.Level) + out.AuditID = types.UID(in.AuditID) + out.Stage = Stage(in.Stage) + out.RequestURI = in.RequestURI + out.Verb = in.Verb + out.User = in.User + out.ImpersonatedUser = (*v1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) + out.UserAgent = in.UserAgent + if in.ObjectRef != nil { + in, out := &in.ObjectRef, &out.ObjectRef + *out = new(ObjectReference) + if err := Convert_audit_ObjectReference_To_v1alpha1_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.ObjectRef = nil + } + out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus)) + out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) + out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) + out.RequestReceivedTimestamp = in.RequestReceivedTimestamp + out.StageTimestamp = in.StageTimestamp + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + return nil +} + +func autoConvert_v1alpha1_EventList_To_audit_EventList(in *EventList, out *audit.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]audit.Event, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Event_To_audit_Event(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha1_EventList_To_audit_EventList is an autogenerated conversion function. +func Convert_v1alpha1_EventList_To_audit_EventList(in *EventList, out *audit.EventList, s conversion.Scope) error { + return autoConvert_v1alpha1_EventList_To_audit_EventList(in, out, s) +} + +func autoConvert_audit_EventList_To_v1alpha1_EventList(in *audit.EventList, out *EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + if err := Convert_audit_Event_To_v1alpha1_Event(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_audit_EventList_To_v1alpha1_EventList is an autogenerated conversion function. +func Convert_audit_EventList_To_v1alpha1_EventList(in *audit.EventList, out *EventList, s conversion.Scope) error { + return autoConvert_audit_EventList_To_v1alpha1_EventList(in, out, s) +} + +func autoConvert_v1alpha1_GroupResources_To_audit_GroupResources(in *GroupResources, out *audit.GroupResources, s conversion.Scope) error { + out.Group = in.Group + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + return nil +} + +// Convert_v1alpha1_GroupResources_To_audit_GroupResources is an autogenerated conversion function. +func Convert_v1alpha1_GroupResources_To_audit_GroupResources(in *GroupResources, out *audit.GroupResources, s conversion.Scope) error { + return autoConvert_v1alpha1_GroupResources_To_audit_GroupResources(in, out, s) +} + +func autoConvert_audit_GroupResources_To_v1alpha1_GroupResources(in *audit.GroupResources, out *GroupResources, s conversion.Scope) error { + out.Group = in.Group + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + return nil +} + +// Convert_audit_GroupResources_To_v1alpha1_GroupResources is an autogenerated conversion function. +func Convert_audit_GroupResources_To_v1alpha1_GroupResources(in *audit.GroupResources, out *GroupResources, s conversion.Scope) error { + return autoConvert_audit_GroupResources_To_v1alpha1_GroupResources(in, out, s) +} + +func autoConvert_v1alpha1_ObjectReference_To_audit_ObjectReference(in *ObjectReference, out *audit.ObjectReference, s conversion.Scope) error { + out.Resource = in.Resource + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.Subresource = in.Subresource + return nil +} + +func autoConvert_audit_ObjectReference_To_v1alpha1_ObjectReference(in *audit.ObjectReference, out *ObjectReference, s conversion.Scope) error { + out.Resource = in.Resource + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + // WARNING: in.APIGroup requires manual conversion: does not exist in peer-type + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.Subresource = in.Subresource + return nil +} + +func autoConvert_v1alpha1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]audit.PolicyRule)(unsafe.Pointer(&in.Rules)) + out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + return nil +} + +// Convert_v1alpha1_Policy_To_audit_Policy is an autogenerated conversion function. +func Convert_v1alpha1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error { + return autoConvert_v1alpha1_Policy_To_audit_Policy(in, out, s) +} + +func autoConvert_audit_Policy_To_v1alpha1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + return nil +} + +// Convert_audit_Policy_To_v1alpha1_Policy is an autogenerated conversion function. +func Convert_audit_Policy_To_v1alpha1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error { + return autoConvert_audit_Policy_To_v1alpha1_Policy(in, out, s) +} + +func autoConvert_v1alpha1_PolicyList_To_audit_PolicyList(in *PolicyList, out *audit.PolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]audit.Policy)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_PolicyList_To_audit_PolicyList is an autogenerated conversion function. +func Convert_v1alpha1_PolicyList_To_audit_PolicyList(in *PolicyList, out *audit.PolicyList, s conversion.Scope) error { + return autoConvert_v1alpha1_PolicyList_To_audit_PolicyList(in, out, s) +} + +func autoConvert_audit_PolicyList_To_v1alpha1_PolicyList(in *audit.PolicyList, out *PolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Policy)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_audit_PolicyList_To_v1alpha1_PolicyList is an autogenerated conversion function. +func Convert_audit_PolicyList_To_v1alpha1_PolicyList(in *audit.PolicyList, out *PolicyList, s conversion.Scope) error { + return autoConvert_audit_PolicyList_To_v1alpha1_PolicyList(in, out, s) +} + +func autoConvert_v1alpha1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *audit.PolicyRule, s conversion.Scope) error { + out.Level = audit.Level(in.Level) + out.Users = *(*[]string)(unsafe.Pointer(&in.Users)) + out.UserGroups = *(*[]string)(unsafe.Pointer(&in.UserGroups)) + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.Resources = *(*[]audit.GroupResources)(unsafe.Pointer(&in.Resources)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + return nil +} + +// Convert_v1alpha1_PolicyRule_To_audit_PolicyRule is an autogenerated conversion function. +func Convert_v1alpha1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *audit.PolicyRule, s conversion.Scope) error { + return autoConvert_v1alpha1_PolicyRule_To_audit_PolicyRule(in, out, s) +} + +func autoConvert_audit_PolicyRule_To_v1alpha1_PolicyRule(in *audit.PolicyRule, out *PolicyRule, s conversion.Scope) error { + out.Level = Level(in.Level) + out.Users = *(*[]string)(unsafe.Pointer(&in.Users)) + out.UserGroups = *(*[]string)(unsafe.Pointer(&in.UserGroups)) + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.Resources = *(*[]GroupResources)(unsafe.Pointer(&in.Resources)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + return nil +} + +// Convert_audit_PolicyRule_To_v1alpha1_PolicyRule is an autogenerated conversion function. +func Convert_audit_PolicyRule_To_v1alpha1_PolicyRule(in *audit.PolicyRule, out *PolicyRule, s conversion.Scope) error { + return autoConvert_audit_PolicyRule_To_v1alpha1_PolicyRule(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..efb6cd87de --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,293 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + in.User.DeepCopyInto(&out.User) + if in.ImpersonatedUser != nil { + in, out := &in.ImpersonatedUser, &out.ImpersonatedUser + *out = new(v1.UserInfo) + (*in).DeepCopyInto(*out) + } + if in.SourceIPs != nil { + in, out := &in.SourceIPs, &out.SourceIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ObjectRef != nil { + in, out := &in.ObjectRef, &out.ObjectRef + *out = new(ObjectReference) + **out = **in + } + if in.ResponseStatus != nil { + in, out := &in.ResponseStatus, &out.ResponseStatus + *out = new(metav1.Status) + (*in).DeepCopyInto(*out) + } + if in.RequestObject != nil { + in, out := &in.RequestObject, &out.RequestObject + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + if in.ResponseObject != nil { + in, out := &in.ResponseObject, &out.ResponseObject + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + in.RequestReceivedTimestamp.DeepCopyInto(&out.RequestReceivedTimestamp) + in.StageTimestamp.DeepCopyInto(&out.StageTimestamp) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResources) DeepCopyInto(out *GroupResources) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResources. +func (in *GroupResources) DeepCopy() *GroupResources { + if in == nil { + return nil + } + out := new(GroupResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyList) DeepCopyInto(out *PolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { + if in == nil { + return nil + } + out := new(PolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UserGroups != nil { + in, out := &in.UserGroups, &out.UserGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]GroupResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go new file mode 100644 index 0000000000..dd621a3acd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/conversion.go new file mode 100644 index 0000000000..bccdcb7243 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/conversion.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apiserver/pkg/apis/audit" +) + +func Convert_v1beta1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { + if err := autoConvert_v1beta1_Event_To_audit_Event(in, out, s); err != nil { + return err + } + if out.StageTimestamp.IsZero() { + out.StageTimestamp = metav1.NewMicroTime(in.CreationTimestamp.Time) + } + if out.RequestReceivedTimestamp.IsZero() { + out.RequestReceivedTimestamp = metav1.NewMicroTime(in.Timestamp.Time) + } + return nil +} + +func Convert_audit_Event_To_v1beta1_Event(in *audit.Event, out *Event, s conversion.Scope) error { + if err := autoConvert_audit_Event_To_v1beta1_Event(in, out, s); err != nil { + return err + } + out.CreationTimestamp = metav1.NewTime(in.StageTimestamp.Time) + out.Timestamp = metav1.NewTime(in.RequestReceivedTimestamp.Time) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go new file mode 100644 index 0000000000..f3fe61683d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/audit +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=audit.k8s.io + +package v1beta1 // import "k8s.io/apiserver/pkg/apis/audit/v1beta1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go new file mode 100644 index 0000000000..14870e4294 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go @@ -0,0 +1,3282 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/authentication/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_c7e4d52063960930, []int{0} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_c7e4d52063960930, []int{1} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} + +var xxx_messageInfo_EventList proto.InternalMessageInfo + +func (m *GroupResources) Reset() { *m = GroupResources{} } +func (*GroupResources) ProtoMessage() {} +func (*GroupResources) Descriptor() ([]byte, []int) { + return fileDescriptor_c7e4d52063960930, []int{2} +} +func (m *GroupResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupResources.Merge(m, src) +} +func (m *GroupResources) XXX_Size() int { + return m.Size() +} +func (m *GroupResources) XXX_DiscardUnknown() { + xxx_messageInfo_GroupResources.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupResources proto.InternalMessageInfo + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_c7e4d52063960930, []int{3} +} +func (m *ObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectReference.Merge(m, src) +} +func (m *ObjectReference) XXX_Size() int { + return m.Size() +} +func (m *ObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectReference proto.InternalMessageInfo + +func (m *Policy) Reset() { *m = Policy{} } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { + return fileDescriptor_c7e4d52063960930, []int{4} +} +func (m *Policy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Policy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Policy.Merge(m, src) +} +func (m *Policy) XXX_Size() int { + return m.Size() +} +func (m *Policy) XXX_DiscardUnknown() { + xxx_messageInfo_Policy.DiscardUnknown(m) +} + +var xxx_messageInfo_Policy proto.InternalMessageInfo + +func (m *PolicyList) Reset() { *m = PolicyList{} } +func (*PolicyList) ProtoMessage() {} +func (*PolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_c7e4d52063960930, []int{5} +} +func (m *PolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyList.Merge(m, src) +} +func (m *PolicyList) XXX_Size() int { + return m.Size() +} +func (m *PolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyList proto.InternalMessageInfo + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_c7e4d52063960930, []int{6} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Event)(nil), "k8s.io.apiserver.pkg.apis.audit.v1beta1.Event") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apiserver.pkg.apis.audit.v1beta1.Event.AnnotationsEntry") + proto.RegisterType((*EventList)(nil), "k8s.io.apiserver.pkg.apis.audit.v1beta1.EventList") + proto.RegisterType((*GroupResources)(nil), "k8s.io.apiserver.pkg.apis.audit.v1beta1.GroupResources") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.apiserver.pkg.apis.audit.v1beta1.ObjectReference") + proto.RegisterType((*Policy)(nil), "k8s.io.apiserver.pkg.apis.audit.v1beta1.Policy") + proto.RegisterType((*PolicyList)(nil), "k8s.io.apiserver.pkg.apis.audit.v1beta1.PolicyList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.apiserver.pkg.apis.audit.v1beta1.PolicyRule") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto", fileDescriptor_c7e4d52063960930) +} + +var fileDescriptor_c7e4d52063960930 = []byte{ + // 1279 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xd6, 0x71, 0x63, 0x8f, 0x1b, 0xc7, 0x9d, 0x56, 0x74, 0x95, 0x83, 0x6d, 0x8c, 0x04, + 0x11, 0xa4, 0xbb, 0x4d, 0x28, 0x24, 0x42, 0x02, 0x64, 0xab, 0x15, 0x58, 0x4a, 0x43, 0x34, 0x8e, + 0x2b, 0x04, 0x1c, 0x58, 0xdb, 0x2f, 0xf6, 0x62, 0x7b, 0x77, 0xd9, 0x99, 0x35, 0xca, 0x8d, 0x2f, + 0x80, 0xc4, 0x9d, 0xcf, 0xc0, 0x85, 0x0f, 0x50, 0x71, 0xcc, 0xb1, 0xc7, 0x9e, 0x2c, 0x62, 0xbe, + 0x45, 0x24, 0x24, 0x34, 0x7f, 0x76, 0x67, 0xed, 0xd4, 0xc2, 0xe1, 0xd0, 0xdb, 0xce, 0x7b, 0xbf, + 0xf7, 0x9b, 0x37, 0xbf, 0x7d, 0xf3, 0xde, 0xa0, 0x93, 0xe1, 0x21, 0xb5, 0x5c, 0xdf, 0x1e, 0x46, + 0x1d, 0x08, 0x3d, 0x60, 0x40, 0xed, 0x09, 0x78, 0x3d, 0x3f, 0xb4, 0x95, 0xc3, 0x09, 0x5c, 0x0a, + 0xe1, 0x04, 0x42, 0x3b, 0x18, 0xf6, 0xc5, 0xca, 0x76, 0xa2, 0x9e, 0xcb, 0xec, 0xc9, 0x5e, 0x07, + 0x98, 0xb3, 0x67, 0xf7, 0xc1, 0x83, 0xd0, 0x61, 0xd0, 0xb3, 0x82, 0xd0, 0x67, 0x3e, 0x7e, 0x4f, + 0x06, 0x5a, 0x49, 0xa0, 0x15, 0x0c, 0xfb, 0x62, 0x65, 0x89, 0x40, 0x4b, 0x05, 0x6e, 0x3f, 0xec, + 0xbb, 0x6c, 0x10, 0x75, 0xac, 0xae, 0x3f, 0xb6, 0xfb, 0x7e, 0xdf, 0xb7, 0x45, 0x7c, 0x27, 0x3a, + 0x13, 0x2b, 0xb1, 0x10, 0x5f, 0x92, 0x77, 0x7b, 0x57, 0x27, 0x64, 0x3b, 0x11, 0x1b, 0x80, 0xc7, + 0xdc, 0xae, 0xc3, 0x5c, 0xdf, 0xb3, 0x27, 0xd7, 0xb2, 0xd8, 0x7e, 0xac, 0xd1, 0x63, 0xa7, 0x3b, + 0x70, 0x3d, 0x08, 0xcf, 0xf5, 0x09, 0xc6, 0xc0, 0x9c, 0xd7, 0x45, 0xd9, 0xcb, 0xa2, 0xc2, 0xc8, + 0x63, 0xee, 0x18, 0xae, 0x05, 0x7c, 0xfc, 0x5f, 0x01, 0xb4, 0x3b, 0x80, 0xb1, 0xb3, 0x18, 0x57, + 0xfb, 0xfd, 0x0e, 0xca, 0x3e, 0x9d, 0x80, 0xc7, 0xf0, 0xf7, 0x28, 0xc7, 0xb3, 0xe9, 0x39, 0xcc, + 0x31, 0x8d, 0xaa, 0xb1, 0x53, 0xd8, 0x7f, 0x64, 0x69, 0x05, 0x13, 0x52, 0x2d, 0x22, 0x47, 0x5b, + 0x93, 0x3d, 0xeb, 0xab, 0xce, 0x0f, 0xd0, 0x65, 0xcf, 0x80, 0x39, 0x0d, 0x7c, 0x31, 0xad, 0xac, + 0xcd, 0xa6, 0x15, 0xa4, 0x6d, 0x24, 0x61, 0xc5, 0xbb, 0x28, 0x3b, 0x82, 0x09, 0x8c, 0xcc, 0x5b, + 0x55, 0x63, 0x27, 0xdf, 0x78, 0x4b, 0x81, 0xb3, 0x47, 0xdc, 0x78, 0x15, 0x7f, 0x10, 0x09, 0xc2, + 0xdf, 0xa2, 0x3c, 0x4f, 0x9c, 0x32, 0x67, 0x1c, 0x98, 0x19, 0x91, 0xd0, 0xfb, 0xab, 0x25, 0x74, + 0xea, 0x8e, 0xa1, 0x71, 0x57, 0xb1, 0xe7, 0x4f, 0x63, 0x12, 0xa2, 0xf9, 0xf0, 0x31, 0xda, 0x10, + 0x35, 0xd0, 0x7c, 0x62, 0xae, 0x8b, 0x64, 0x1e, 0x2b, 0xf8, 0x46, 0x5d, 0x9a, 0xaf, 0xa6, 0x95, + 0xb7, 0x97, 0x49, 0xca, 0xce, 0x03, 0xa0, 0x56, 0xbb, 0xf9, 0x84, 0xc4, 0x24, 0xfc, 0x68, 0x94, + 0x39, 0x7d, 0x30, 0xb3, 0xf3, 0x47, 0x6b, 0x71, 0xe3, 0x55, 0xfc, 0x41, 0x24, 0x08, 0xef, 0x23, + 0x14, 0xc2, 0x8f, 0x11, 0x50, 0xd6, 0x26, 0x4d, 0xf3, 0xb6, 0x08, 0x49, 0xa4, 0x23, 0x89, 0x87, + 0xa4, 0x50, 0xb8, 0x8a, 0xd6, 0x27, 0x10, 0x76, 0xcc, 0x0d, 0x81, 0xbe, 0xa3, 0xd0, 0xeb, 0xcf, + 0x21, 0xec, 0x10, 0xe1, 0xc1, 0x5f, 0xa2, 0xf5, 0x88, 0x42, 0x68, 0xe6, 0x84, 0x56, 0xef, 0xa6, + 0xb4, 0xb2, 0xe6, 0xcb, 0x94, 0x6b, 0xd4, 0xa6, 0x10, 0x36, 0xbd, 0x33, 0x5f, 0x33, 0x71, 0x0b, + 0x11, 0x0c, 0x78, 0x80, 0x4a, 0xee, 0x38, 0x80, 0x90, 0xfa, 0x1e, 0x2f, 0x15, 0xee, 0x31, 0xf3, + 0x37, 0x62, 0xbd, 0x3f, 0x9b, 0x56, 0x4a, 0xcd, 0x05, 0x0e, 0x72, 0x8d, 0x15, 0x7f, 0x80, 0xf2, + 0xd4, 0x8f, 0xc2, 0x2e, 0x34, 0x4f, 0xa8, 0x89, 0xaa, 0x99, 0x9d, 0x7c, 0x63, 0x93, 0xff, 0xb4, + 0x56, 0x6c, 0x24, 0xda, 0x8f, 0x6d, 0x94, 0xe7, 0xe9, 0xd5, 0xfb, 0xe0, 0x31, 0x13, 0x0b, 0x1d, + 0x92, 0xbf, 0xdc, 0x8e, 0x1d, 0x44, 0x63, 0x30, 0xa0, 0xbc, 0x2f, 0x0a, 0x91, 0xc0, 0x99, 0x59, + 0x10, 0x07, 0x38, 0xb4, 0x56, 0xec, 0x0a, 0xaa, 0xac, 0x09, 0x9c, 0x41, 0x08, 0x5e, 0x17, 0x64, + 0x5e, 0x89, 0x91, 0x68, 0x66, 0x3c, 0x40, 0xc5, 0x10, 0x68, 0xe0, 0x7b, 0x14, 0x5a, 0xcc, 0x61, + 0x11, 0x35, 0xef, 0x88, 0xbd, 0x76, 0x57, 0x2b, 0x57, 0x19, 0xd3, 0xc0, 0xb3, 0x69, 0xa5, 0x48, + 0xe6, 0x78, 0xc8, 0x02, 0x2f, 0x76, 0xd0, 0xa6, 0x2a, 0x09, 0x99, 0x88, 0xb9, 0x29, 0x36, 0xda, + 0x59, 0xba, 0x91, 0xba, 0xfd, 0x56, 0xdb, 0x1b, 0x7a, 0xfe, 0x4f, 0x5e, 0xe3, 0xee, 0x6c, 0x5a, + 0xd9, 0x24, 0x69, 0x0a, 0x32, 0xcf, 0x88, 0x7b, 0xfa, 0x30, 0x6a, 0x8f, 0xe2, 0x0d, 0xf7, 0x98, + 0x3b, 0x88, 0xda, 0x64, 0x81, 0x13, 0xff, 0x62, 0x20, 0x53, 0xed, 0x4b, 0xa0, 0x0b, 0xee, 0x04, + 0x7a, 0xc9, 0x3d, 0x35, 0xb7, 0xc4, 0x86, 0xf6, 0x6a, 0xea, 0x3d, 0x73, 0xbb, 0xa1, 0x2f, 0x6e, + 0x7c, 0x55, 0xd5, 0x82, 0x49, 0x96, 0x10, 0x93, 0xa5, 0x5b, 0x62, 0x1f, 0x15, 0xc5, 0xd5, 0xd4, + 0x49, 0x94, 0xfe, 0x5f, 0x12, 0xf1, 0xcd, 0x2f, 0xb6, 0xe6, 0xe8, 0xc8, 0x02, 0x3d, 0x9e, 0xa0, + 0x82, 0xe3, 0x79, 0x3e, 0x13, 0x57, 0x87, 0x9a, 0x77, 0xab, 0x99, 0x9d, 0xc2, 0xfe, 0xe7, 0x2b, + 0x17, 0xa7, 0x68, 0xd9, 0x56, 0x5d, 0x33, 0x3c, 0xf5, 0x58, 0x78, 0xde, 0xb8, 0xa7, 0x76, 0x2f, + 0xa4, 0x3c, 0x24, 0xbd, 0xd1, 0xf6, 0x67, 0xa8, 0xb4, 0x18, 0x85, 0x4b, 0x28, 0x33, 0x84, 0x73, + 0xd1, 0xf4, 0xf3, 0x84, 0x7f, 0xe2, 0xfb, 0x28, 0x3b, 0x71, 0x46, 0x11, 0xc8, 0x4e, 0x4d, 0xe4, + 0xe2, 0x93, 0x5b, 0x87, 0x46, 0xed, 0x85, 0x81, 0xf2, 0x62, 0xf3, 0x23, 0x97, 0x32, 0xfc, 0xdd, + 0xb5, 0x99, 0x61, 0xad, 0x26, 0x18, 0x8f, 0x16, 0x13, 0xa3, 0xa4, 0x32, 0xce, 0xc5, 0x96, 0xd4, + 0xbc, 0x68, 0xa1, 0xac, 0xcb, 0x60, 0x4c, 0xcd, 0x5b, 0x42, 0x1d, 0xeb, 0x66, 0xea, 0x34, 0x36, + 0xe3, 0x26, 0xdc, 0xe4, 0x24, 0x44, 0x72, 0xd5, 0x7e, 0x33, 0x50, 0xf1, 0x8b, 0xd0, 0x8f, 0x02, + 0x02, 0xb2, 0xb3, 0x50, 0xfc, 0x0e, 0xca, 0xf6, 0xb9, 0x45, 0x2a, 0xa0, 0xe3, 0x24, 0x4c, 0xfa, + 0x78, 0xa7, 0x0a, 0xe3, 0x08, 0x91, 0x90, 0xea, 0x54, 0x09, 0x0d, 0xd1, 0x7e, 0x7c, 0xc0, 0xef, + 0xa9, 0x5c, 0x1c, 0x3b, 0x63, 0xa0, 0x66, 0x46, 0x04, 0xa8, 0xdb, 0x97, 0x72, 0x90, 0x79, 0x5c, + 0xed, 0x8f, 0x0c, 0xda, 0x5a, 0x68, 0x3c, 0x78, 0x17, 0xe5, 0x62, 0x90, 0xca, 0x30, 0x11, 0x2d, + 0xe6, 0x22, 0x09, 0x82, 0x37, 0x49, 0x8f, 0x53, 0x05, 0x4e, 0x57, 0xfd, 0x3e, 0xdd, 0x24, 0x8f, + 0x63, 0x07, 0xd1, 0x18, 0x3e, 0x58, 0xf8, 0x42, 0x8c, 0xd8, 0xd4, 0x60, 0xe1, 0x58, 0x22, 0x3c, + 0xb8, 0x81, 0x32, 0x91, 0xdb, 0x53, 0x83, 0xf2, 0x91, 0x02, 0x64, 0xda, 0xab, 0x0e, 0x49, 0x1e, + 0xcc, 0x0f, 0xe1, 0x04, 0xae, 0x50, 0x54, 0xcd, 0xc8, 0xe4, 0x10, 0xf5, 0x93, 0xa6, 0x54, 0x3a, + 0x41, 0xf0, 0x01, 0xe9, 0x04, 0xee, 0x73, 0x08, 0xa9, 0xeb, 0x7b, 0x8b, 0x03, 0xb2, 0x7e, 0xd2, + 0x54, 0x1e, 0x92, 0x42, 0xe1, 0x3a, 0xda, 0x8a, 0x45, 0x88, 0x03, 0xe5, 0xac, 0x7c, 0xa0, 0x02, + 0xb7, 0xc8, 0xbc, 0x9b, 0x2c, 0xe2, 0xf1, 0x47, 0xa8, 0x40, 0xa3, 0x4e, 0x22, 0x76, 0x4e, 0x84, + 0x27, 0x77, 0xaa, 0xa5, 0x5d, 0x24, 0x8d, 0xab, 0xfd, 0x63, 0xa0, 0xdb, 0x27, 0xfe, 0xc8, 0xed, + 0x9e, 0xbf, 0x81, 0x47, 0xd4, 0xd7, 0x28, 0x1b, 0x46, 0x23, 0x88, 0x2f, 0xc5, 0x87, 0x2b, 0x5f, + 0x0a, 0x99, 0x21, 0x89, 0x46, 0xa0, 0x2b, 0x9c, 0xaf, 0x28, 0x91, 0x84, 0xf8, 0x00, 0x21, 0x7f, + 0xec, 0x32, 0xd1, 0xb8, 0xe2, 0x8a, 0x7d, 0x20, 0xf2, 0x48, 0xac, 0xfa, 0x25, 0x93, 0x82, 0xd6, + 0xfe, 0x34, 0x10, 0x92, 0xec, 0x6f, 0xa0, 0x29, 0x9c, 0xce, 0x37, 0x05, 0xfb, 0x86, 0xe7, 0x5f, + 0xd2, 0x15, 0x5e, 0x64, 0xe2, 0x23, 0x70, 0x49, 0xf4, 0x4b, 0xd5, 0x58, 0xe5, 0xa5, 0x5a, 0x41, + 0x59, 0xfe, 0xe6, 0x88, 0xdb, 0x42, 0x9e, 0x23, 0xf9, 0x7b, 0x84, 0x12, 0x69, 0xc7, 0x16, 0x42, + 0xfc, 0x43, 0xd4, 0x76, 0xac, 0x6c, 0x91, 0x2b, 0xdb, 0x4e, 0xac, 0x24, 0x85, 0xe0, 0x84, 0xfc, + 0x45, 0x47, 0xcd, 0x75, 0x4d, 0xc8, 0x1f, 0x7a, 0x94, 0x48, 0x3b, 0x1e, 0xa4, 0x9b, 0x51, 0x56, + 0x08, 0x71, 0xb0, 0xb2, 0x10, 0xf3, 0xdd, 0x4f, 0x77, 0x87, 0xd7, 0x76, 0x32, 0x0b, 0xa1, 0xa4, + 0x55, 0x50, 0xf3, 0xb6, 0x4e, 0x3d, 0xe9, 0x25, 0x94, 0xa4, 0x10, 0xf8, 0x53, 0xb4, 0xe5, 0xf9, + 0x5e, 0x4c, 0xd5, 0x26, 0x47, 0xd4, 0xdc, 0x10, 0x41, 0xf7, 0xf8, 0x0d, 0x3c, 0x9e, 0x77, 0x91, + 0x45, 0xec, 0x42, 0x0d, 0xe6, 0x56, 0xae, 0xc1, 0xc6, 0xc3, 0x8b, 0xcb, 0xf2, 0xda, 0xcb, 0xcb, + 0xf2, 0xda, 0xab, 0xcb, 0xf2, 0xda, 0xcf, 0xb3, 0xb2, 0x71, 0x31, 0x2b, 0x1b, 0x2f, 0x67, 0x65, + 0xe3, 0xd5, 0xac, 0x6c, 0xfc, 0x35, 0x2b, 0x1b, 0xbf, 0xfe, 0x5d, 0x5e, 0xfb, 0x66, 0x43, 0x69, + 0xf0, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4f, 0xae, 0x4a, 0x9d, 0x6e, 0x0e, 0x00, 0x00, +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UserAgent) + copy(dAtA[i:], m.UserAgent) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserAgent))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + { + size, err := m.StageTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + { + size, err := m.RequestReceivedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + if m.ResponseObject != nil { + { + size, err := m.ResponseObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.RequestObject != nil { + { + size, err := m.RequestObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if m.ResponseStatus != nil { + { + size, err := m.ResponseStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.ObjectRef != nil { + { + size, err := m.ObjectRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if len(m.SourceIPs) > 0 { + for iNdEx := len(m.SourceIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceIPs[iNdEx]) + copy(dAtA[i:], m.SourceIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SourceIPs[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if m.ImpersonatedUser != nil { + { + size, err := m.ImpersonatedUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x3a + i -= len(m.RequestURI) + copy(dAtA[i:], m.RequestURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestURI))) + i-- + dAtA[i] = 0x32 + i -= len(m.Stage) + copy(dAtA[i:], m.Stage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Stage))) + i-- + dAtA[i] = 0x2a + i -= len(m.AuditID) + copy(dAtA[i:], m.AuditID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuditID))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x42 + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x3a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Policy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OmitStages) > 0 { + for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OmitStages[iNdEx]) + copy(dAtA[i:], m.OmitStages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OmitStages[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OmitStages) > 0 { + for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OmitStages[iNdEx]) + copy(dAtA[i:], m.OmitStages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OmitStages[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.UserGroups) > 0 { + for iNdEx := len(m.UserGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UserGroups[iNdEx]) + copy(dAtA[i:], m.UserGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserGroups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Users) > 0 { + for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Users[iNdEx]) + copy(dAtA[i:], m.Users[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Users[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Timestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuditID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Stage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RequestURI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.ImpersonatedUser != nil { + l = m.ImpersonatedUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.SourceIPs) > 0 { + for _, s := range m.SourceIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ObjectRef != nil { + l = m.ObjectRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResponseStatus != nil { + l = m.ResponseStatus.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RequestObject != nil { + l = m.RequestObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResponseObject != nil { + l = m.ResponseObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.RequestReceivedTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.StageTimestamp.Size() + n += 2 + l + sovGenerated(uint64(l)) + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.UserAgent) + n += 2 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Policy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.OmitStages) > 0 { + for _, s := range m.OmitStages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.UserGroups) > 0 { + for _, s := range m.UserGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.OmitStages) > 0 { + for _, s := range m.OmitStages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `AuditID:` + fmt.Sprintf("%v", this.AuditID) + `,`, + `Stage:` + fmt.Sprintf("%v", this.Stage) + `,`, + `RequestURI:` + fmt.Sprintf("%v", this.RequestURI) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `ImpersonatedUser:` + strings.Replace(fmt.Sprintf("%v", this.ImpersonatedUser), "UserInfo", "v11.UserInfo", 1) + `,`, + `SourceIPs:` + fmt.Sprintf("%v", this.SourceIPs) + `,`, + `ObjectRef:` + strings.Replace(this.ObjectRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `ResponseStatus:` + strings.Replace(fmt.Sprintf("%v", this.ResponseStatus), "Status", "v1.Status", 1) + `,`, + `RequestObject:` + strings.Replace(fmt.Sprintf("%v", this.RequestObject), "Unknown", "runtime.Unknown", 1) + `,`, + `ResponseObject:` + strings.Replace(fmt.Sprintf("%v", this.ResponseObject), "Unknown", "runtime.Unknown", 1) + `,`, + `RequestReceivedTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequestReceivedTimestamp), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `StageTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StageTimestamp), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `UserAgent:` + fmt.Sprintf("%v", this.UserAgent) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *GroupResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupResources{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectReference{`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `}`, + }, "") + return s +} +func (this *Policy) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&Policy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Policy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Policy", "Policy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]GroupResources{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "GroupResources", "GroupResources", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&PolicyRule{`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `Users:` + fmt.Sprintf("%v", this.Users) + `,`, + `UserGroups:` + fmt.Sprintf("%v", this.UserGroups) + `,`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = Level(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuditID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stage = Stage(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImpersonatedUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImpersonatedUser == nil { + m.ImpersonatedUser = &v11.UserInfo{} + } + if err := m.ImpersonatedUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceIPs = append(m.SourceIPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectRef == nil { + m.ObjectRef = &ObjectReference{} + } + if err := m.ObjectRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseStatus == nil { + m.ResponseStatus = &v1.Status{} + } + if err := m.ResponseStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestObject == nil { + m.RequestObject = &runtime.Unknown{} + } + if err := m.RequestObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseObject == nil { + m.ResponseObject = &runtime.Unknown{} + } + if err := m.ResponseObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestReceivedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RequestReceivedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StageTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StageTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserAgent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserAgent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Policy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Policy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Policy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = Level(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserGroups = append(m.UserGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, GroupResources{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto new file mode 100644 index 0000000000..95bfb8cec5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto @@ -0,0 +1,259 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.apiserver.pkg.apis.audit.v1beta1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Event captures all the information that can be included in an API audit log. +message Event { + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + // DEPRECATED: Use StageTimestamp which supports micro second instead of ObjectMeta.CreateTimestamp + // and the rest of the object is not used + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // AuditLevel at which event was generated + optional string level = 2; + + // Time the request reached the apiserver. + // DEPRECATED: Use RequestReceivedTimestamp which supports micro second instead. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 3; + + // Unique audit ID, generated for each request. + optional string auditID = 4; + + // Stage of the request handling when this event instance was generated. + optional string stage = 5; + + // RequestURI is the request URI as sent by the client to a server. + optional string requestURI = 6; + + // Verb is the kubernetes verb associated with the request. + // For non-resource requests, this is the lower-cased HTTP method. + optional string verb = 7; + + // Authenticated user information. + optional k8s.io.api.authentication.v1.UserInfo user = 8; + + // Impersonated user information. + // +optional + optional k8s.io.api.authentication.v1.UserInfo impersonatedUser = 9; + + // Source IPs, from where the request originated and intermediate proxies. + // +optional + repeated string sourceIPs = 10; + + // UserAgent records the user agent string reported by the client. + // Note that the UserAgent is provided by the client, and must not be trusted. + // +optional + optional string userAgent = 18; + + // Object reference this request is targeted at. + // Does not apply for List-type requests, or non-resource requests. + // +optional + optional ObjectReference objectRef = 11; + + // The response status, populated even when the ResponseObject is not a Status type. + // For successful responses, this will only include the Code and StatusSuccess. + // For non-status type error responses, this will be auto-populated with the error Message. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status responseStatus = 12; + + // API object from the request, in JSON format. The RequestObject is recorded as-is in the request + // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or + // merging. It is an external versioned object type, and may not be a valid object on its own. + // Omitted for non-resource requests. Only logged at Request Level and higher. + // +optional + optional k8s.io.apimachinery.pkg.runtime.Unknown requestObject = 13; + + // API object returned in the response, in JSON. The ResponseObject is recorded after conversion + // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged + // at Response Level. + // +optional + optional k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 14; + + // Time the request reached the apiserver. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 15; + + // Time the request reached current audit stage. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 16; + + // Annotations is an unstructured key value map stored with an audit event that may be set by + // plugins invoked in the request serving chain, including authentication, authorization and + // admission plugins. Note that these annotations are for the audit event, and do not correspond + // to the metadata.annotations of the submitted object. Keys should uniquely identify the informing + // component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values + // should be short. Annotations are included in the Metadata level. + // +optional + map annotations = 17; +} + +// EventList is a list of audit Events. +message EventList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated Event items = 2; +} + +// GroupResources represents resource kinds in an API group. +message GroupResources { + // Group is the name of the API group that contains the resources. + // The empty string represents the core API group. + // +optional + optional string group = 1; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. + // +optional + repeated string resources = 2; + + // ResourceNames is a list of resource instance names that the policy matches. + // Using this field requires Resources to be specified. + // An empty list implies that every instance of the resource is matched. + // +optional + repeated string resourceNames = 3; +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +message ObjectReference { + // +optional + optional string resource = 1; + + // +optional + optional string namespace = 2; + + // +optional + optional string name = 3; + + // +optional + optional string uid = 4; + + // APIGroup is the name of the API group that contains the referred object. + // The empty string represents the core API group. + // +optional + optional string apiGroup = 5; + + // APIVersion is the version of the API group that contains the referred object. + // +optional + optional string apiVersion = 6; + + // +optional + optional string resourceVersion = 7; + + // +optional + optional string subresource = 8; +} + +// Policy defines the configuration of audit logging, and the rules for how different request +// categories are logged. +message Policy { + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules specify the audit Level a request should be recorded at. + // A request may match multiple rules, in which case the FIRST matching rule is used. + // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. + // PolicyRules are strictly ordered. + repeated PolicyRule rules = 2; + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + repeated string omitStages = 3; +} + +// PolicyList is a list of audit Policies. +message PolicyList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated Policy items = 2; +} + +// PolicyRule maps requests based off metadata to an audit Level. +// Requests must match the rules of every field (an intersection of rules). +message PolicyRule { + // The Level that requests matching this rule are recorded at. + optional string level = 1; + + // The users (by authenticated user name) this rule applies to. + // An empty list implies every user. + // +optional + repeated string users = 2; + + // The user groups this rule applies to. A user is considered matching + // if it is a member of any of the UserGroups. + // An empty list implies every user group. + // +optional + repeated string userGroups = 3; + + // The verbs that match this rule. + // An empty list implies every verb. + // +optional + repeated string verbs = 4; + + // Resources that this rule matches. An empty list implies all kinds in all API groups. + // +optional + repeated GroupResources resources = 5; + + // Namespaces that this rule matches. + // The empty string "" matches non-namespaced resources. + // An empty list implies every namespace. + // +optional + repeated string namespaces = 6; + + // NonResourceURLs is a set of URL paths that should be audited. + // *s are allowed, but only as the full, final step in the path. + // Examples: + // "/metrics" - Log requests for apiserver metrics + // "/healthz*" - Log all health checks + // +optional + repeated string nonResourceURLs = 7; + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. + // An empty list means no restrictions will apply. + // +optional + repeated string omitStages = 8; +} + diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/register.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/register.go new file mode 100644 index 0000000000..1bb1a50f29 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "audit.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Event{}, + &EventList{}, + &Policy{}, + &PolicyList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go new file mode 100644 index 0000000000..0317cf6ec5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go @@ -0,0 +1,288 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authnv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// Header keys used by the audit system. +const ( + // Header to hold the audit ID as the request is propagated through the serving hierarchy. The + // Audit-ID header should be set by the first server to receive the request (e.g. the federation + // server or kube-aggregator). + HeaderAuditID = "Audit-ID" +) + +// Level defines the amount of information logged during auditing +type Level string + +// Valid audit levels +const ( + // LevelNone disables auditing + LevelNone Level = "None" + // LevelMetadata provides the basic level of auditing. + LevelMetadata Level = "Metadata" + // LevelRequest provides Metadata level of auditing, and additionally + // logs the request object (does not apply for non-resource requests). + LevelRequest Level = "Request" + // LevelRequestResponse provides Request level of auditing, and additionally + // logs the response object (does not apply for non-resource requests). + LevelRequestResponse Level = "RequestResponse" +) + +// Stage defines the stages in request handling that audit events may be generated. +type Stage string + +// Valid audit stages. +const ( + // The stage for events generated as soon as the audit handler receives the request, and before it + // is delegated down the handler chain. + StageRequestReceived = "RequestReceived" + // The stage for events generated once the response headers are sent, but before the response body + // is sent. This stage is only generated for long-running requests (e.g. watch). + StageResponseStarted = "ResponseStarted" + // The stage for events generated once the response body has been completed, and no more bytes + // will be sent. + StageResponseComplete = "ResponseComplete" + // The stage for events generated when a panic occurred. + StagePanic = "Panic" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event captures all the information that can be included in an API audit log. +type Event struct { + metav1.TypeMeta `json:",inline"` + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + // DEPRECATED: Use StageTimestamp which supports micro second instead of ObjectMeta.CreateTimestamp + // and the rest of the object is not used + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // AuditLevel at which event was generated + Level Level `json:"level" protobuf:"bytes,2,opt,name=level,casttype=Level"` + + // Time the request reached the apiserver. + // DEPRECATED: Use RequestReceivedTimestamp which supports micro second instead. + Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,3,opt,name=timestamp"` + // Unique audit ID, generated for each request. + AuditID types.UID `json:"auditID" protobuf:"bytes,4,opt,name=auditID,casttype=k8s.io/apimachinery/pkg/types.UID"` + // Stage of the request handling when this event instance was generated. + Stage Stage `json:"stage" protobuf:"bytes,5,opt,name=stage,casttype=Stage"` + + // RequestURI is the request URI as sent by the client to a server. + RequestURI string `json:"requestURI" protobuf:"bytes,6,opt,name=requestURI"` + // Verb is the kubernetes verb associated with the request. + // For non-resource requests, this is the lower-cased HTTP method. + Verb string `json:"verb" protobuf:"bytes,7,opt,name=verb"` + // Authenticated user information. + User authnv1.UserInfo `json:"user" protobuf:"bytes,8,opt,name=user"` + // Impersonated user information. + // +optional + ImpersonatedUser *authnv1.UserInfo `json:"impersonatedUser,omitempty" protobuf:"bytes,9,opt,name=impersonatedUser"` + // Source IPs, from where the request originated and intermediate proxies. + // +optional + SourceIPs []string `json:"sourceIPs,omitempty" protobuf:"bytes,10,rep,name=sourceIPs"` + // UserAgent records the user agent string reported by the client. + // Note that the UserAgent is provided by the client, and must not be trusted. + // +optional + UserAgent string `json:"userAgent,omitempty" protobuf:"bytes,18,opt,name=userAgent"` + // Object reference this request is targeted at. + // Does not apply for List-type requests, or non-resource requests. + // +optional + ObjectRef *ObjectReference `json:"objectRef,omitempty" protobuf:"bytes,11,opt,name=objectRef"` + // The response status, populated even when the ResponseObject is not a Status type. + // For successful responses, this will only include the Code and StatusSuccess. + // For non-status type error responses, this will be auto-populated with the error Message. + // +optional + ResponseStatus *metav1.Status `json:"responseStatus,omitempty" protobuf:"bytes,12,opt,name=responseStatus"` + + // API object from the request, in JSON format. The RequestObject is recorded as-is in the request + // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or + // merging. It is an external versioned object type, and may not be a valid object on its own. + // Omitted for non-resource requests. Only logged at Request Level and higher. + // +optional + RequestObject *runtime.Unknown `json:"requestObject,omitempty" protobuf:"bytes,13,opt,name=requestObject"` + // API object returned in the response, in JSON. The ResponseObject is recorded after conversion + // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged + // at Response Level. + // +optional + ResponseObject *runtime.Unknown `json:"responseObject,omitempty" protobuf:"bytes,14,opt,name=responseObject"` + // Time the request reached the apiserver. + // +optional + RequestReceivedTimestamp metav1.MicroTime `json:"requestReceivedTimestamp" protobuf:"bytes,15,opt,name=requestReceivedTimestamp"` + // Time the request reached current audit stage. + // +optional + StageTimestamp metav1.MicroTime `json:"stageTimestamp" protobuf:"bytes,16,opt,name=stageTimestamp"` + + // Annotations is an unstructured key value map stored with an audit event that may be set by + // plugins invoked in the request serving chain, including authentication, authorization and + // admission plugins. Note that these annotations are for the audit event, and do not correspond + // to the metadata.annotations of the submitted object. Keys should uniquely identify the informing + // component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values + // should be short. Annotations are included in the Metadata level. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,17,rep,name=annotations"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of audit Events. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Policy defines the configuration of audit logging, and the rules for how different request +// categories are logged. +type Policy struct { + metav1.TypeMeta `json:",inline"` + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules specify the audit Level a request should be recorded at. + // A request may match multiple rules, in which case the FIRST matching rule is used. + // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. + // PolicyRules are strictly ordered. + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,3,rep,name=omitStages"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PolicyList is a list of audit Policies. +type PolicyList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []Policy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PolicyRule maps requests based off metadata to an audit Level. +// Requests must match the rules of every field (an intersection of rules). +type PolicyRule struct { + // The Level that requests matching this rule are recorded at. + Level Level `json:"level" protobuf:"bytes,1,opt,name=level,casttype=Level"` + + // The users (by authenticated user name) this rule applies to. + // An empty list implies every user. + // +optional + Users []string `json:"users,omitempty" protobuf:"bytes,2,rep,name=users"` + // The user groups this rule applies to. A user is considered matching + // if it is a member of any of the UserGroups. + // An empty list implies every user group. + // +optional + UserGroups []string `json:"userGroups,omitempty" protobuf:"bytes,3,rep,name=userGroups"` + + // The verbs that match this rule. + // An empty list implies every verb. + // +optional + Verbs []string `json:"verbs,omitempty" protobuf:"bytes,4,rep,name=verbs"` + + // Rules can apply to API resources (such as "pods" or "secrets"), + // non-resource URL paths (such as "/api"), or neither, but not both. + // If neither is specified, the rule is treated as a default for all URLs. + + // Resources that this rule matches. An empty list implies all kinds in all API groups. + // +optional + Resources []GroupResources `json:"resources,omitempty" protobuf:"bytes,5,rep,name=resources"` + // Namespaces that this rule matches. + // The empty string "" matches non-namespaced resources. + // An empty list implies every namespace. + // +optional + Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,6,rep,name=namespaces"` + + // NonResourceURLs is a set of URL paths that should be audited. + // *s are allowed, but only as the full, final step in the path. + // Examples: + // "/metrics" - Log requests for apiserver metrics + // "/healthz*" - Log all health checks + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,7,rep,name=nonResourceURLs"` + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. + // An empty list means no restrictions will apply. + // +optional + OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,8,rep,name=omitStages"` +} + +// GroupResources represents resource kinds in an API group. +type GroupResources struct { + // Group is the name of the API group that contains the resources. + // The empty string represents the core API group. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` + // ResourceNames is a list of resource instance names that the policy matches. + // Using this field requires Resources to be specified. + // An empty list implies that every instance of the resource is matched. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,3,rep,name=resourceNames"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // +optional + Resource string `json:"resource,omitempty" protobuf:"bytes,1,opt,name=resource"` + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // APIGroup is the name of the API group that contains the referred object. + // The empty string represents the core API group. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,5,opt,name=apiGroup"` + // APIVersion is the version of the API group that contains the referred object. + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,6,opt,name=apiVersion"` + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,7,opt,name=resourceVersion"` + // +optional + Subresource string `json:"subresource,omitempty" protobuf:"bytes,8,opt,name=subresource"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..0aac32119e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go @@ -0,0 +1,334 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + audit "k8s.io/apiserver/pkg/apis/audit" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*EventList)(nil), (*audit.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_EventList_To_audit_EventList(a.(*EventList), b.(*audit.EventList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.EventList)(nil), (*EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_EventList_To_v1beta1_EventList(a.(*audit.EventList), b.(*EventList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*GroupResources)(nil), (*audit.GroupResources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_GroupResources_To_audit_GroupResources(a.(*GroupResources), b.(*audit.GroupResources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.GroupResources)(nil), (*GroupResources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_GroupResources_To_v1beta1_GroupResources(a.(*audit.GroupResources), b.(*GroupResources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ObjectReference)(nil), (*audit.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ObjectReference_To_audit_ObjectReference(a.(*ObjectReference), b.(*audit.ObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.ObjectReference)(nil), (*ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_ObjectReference_To_v1beta1_ObjectReference(a.(*audit.ObjectReference), b.(*ObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Policy)(nil), (*audit.Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Policy_To_audit_Policy(a.(*Policy), b.(*audit.Policy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.Policy)(nil), (*Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_Policy_To_v1beta1_Policy(a.(*audit.Policy), b.(*Policy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PolicyList)(nil), (*audit.PolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PolicyList_To_audit_PolicyList(a.(*PolicyList), b.(*audit.PolicyList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.PolicyList)(nil), (*PolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_PolicyList_To_v1beta1_PolicyList(a.(*audit.PolicyList), b.(*PolicyList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PolicyRule)(nil), (*audit.PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PolicyRule_To_audit_PolicyRule(a.(*PolicyRule), b.(*audit.PolicyRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.PolicyRule)(nil), (*PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_PolicyRule_To_v1beta1_PolicyRule(a.(*audit.PolicyRule), b.(*PolicyRule), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*audit.Event)(nil), (*Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_Event_To_v1beta1_Event(a.(*audit.Event), b.(*Event), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*Event)(nil), (*audit.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Event_To_audit_Event(a.(*Event), b.(*audit.Event), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { + // WARNING: in.ObjectMeta requires manual conversion: does not exist in peer-type + out.Level = audit.Level(in.Level) + // WARNING: in.Timestamp requires manual conversion: does not exist in peer-type + out.AuditID = types.UID(in.AuditID) + out.Stage = audit.Stage(in.Stage) + out.RequestURI = in.RequestURI + out.Verb = in.Verb + out.User = in.User + out.ImpersonatedUser = (*v1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) + out.UserAgent = in.UserAgent + out.ObjectRef = (*audit.ObjectReference)(unsafe.Pointer(in.ObjectRef)) + out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus)) + out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) + out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) + out.RequestReceivedTimestamp = in.RequestReceivedTimestamp + out.StageTimestamp = in.StageTimestamp + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + return nil +} + +func autoConvert_audit_Event_To_v1beta1_Event(in *audit.Event, out *Event, s conversion.Scope) error { + out.Level = Level(in.Level) + out.AuditID = types.UID(in.AuditID) + out.Stage = Stage(in.Stage) + out.RequestURI = in.RequestURI + out.Verb = in.Verb + out.User = in.User + out.ImpersonatedUser = (*v1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) + out.UserAgent = in.UserAgent + out.ObjectRef = (*ObjectReference)(unsafe.Pointer(in.ObjectRef)) + out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus)) + out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) + out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) + out.RequestReceivedTimestamp = in.RequestReceivedTimestamp + out.StageTimestamp = in.StageTimestamp + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + return nil +} + +func autoConvert_v1beta1_EventList_To_audit_EventList(in *EventList, out *audit.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]audit.Event, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Event_To_audit_Event(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta1_EventList_To_audit_EventList is an autogenerated conversion function. +func Convert_v1beta1_EventList_To_audit_EventList(in *EventList, out *audit.EventList, s conversion.Scope) error { + return autoConvert_v1beta1_EventList_To_audit_EventList(in, out, s) +} + +func autoConvert_audit_EventList_To_v1beta1_EventList(in *audit.EventList, out *EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + if err := Convert_audit_Event_To_v1beta1_Event(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_audit_EventList_To_v1beta1_EventList is an autogenerated conversion function. +func Convert_audit_EventList_To_v1beta1_EventList(in *audit.EventList, out *EventList, s conversion.Scope) error { + return autoConvert_audit_EventList_To_v1beta1_EventList(in, out, s) +} + +func autoConvert_v1beta1_GroupResources_To_audit_GroupResources(in *GroupResources, out *audit.GroupResources, s conversion.Scope) error { + out.Group = in.Group + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + return nil +} + +// Convert_v1beta1_GroupResources_To_audit_GroupResources is an autogenerated conversion function. +func Convert_v1beta1_GroupResources_To_audit_GroupResources(in *GroupResources, out *audit.GroupResources, s conversion.Scope) error { + return autoConvert_v1beta1_GroupResources_To_audit_GroupResources(in, out, s) +} + +func autoConvert_audit_GroupResources_To_v1beta1_GroupResources(in *audit.GroupResources, out *GroupResources, s conversion.Scope) error { + out.Group = in.Group + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + return nil +} + +// Convert_audit_GroupResources_To_v1beta1_GroupResources is an autogenerated conversion function. +func Convert_audit_GroupResources_To_v1beta1_GroupResources(in *audit.GroupResources, out *GroupResources, s conversion.Scope) error { + return autoConvert_audit_GroupResources_To_v1beta1_GroupResources(in, out, s) +} + +func autoConvert_v1beta1_ObjectReference_To_audit_ObjectReference(in *ObjectReference, out *audit.ObjectReference, s conversion.Scope) error { + out.Resource = in.Resource + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIGroup = in.APIGroup + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.Subresource = in.Subresource + return nil +} + +// Convert_v1beta1_ObjectReference_To_audit_ObjectReference is an autogenerated conversion function. +func Convert_v1beta1_ObjectReference_To_audit_ObjectReference(in *ObjectReference, out *audit.ObjectReference, s conversion.Scope) error { + return autoConvert_v1beta1_ObjectReference_To_audit_ObjectReference(in, out, s) +} + +func autoConvert_audit_ObjectReference_To_v1beta1_ObjectReference(in *audit.ObjectReference, out *ObjectReference, s conversion.Scope) error { + out.Resource = in.Resource + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIGroup = in.APIGroup + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.Subresource = in.Subresource + return nil +} + +// Convert_audit_ObjectReference_To_v1beta1_ObjectReference is an autogenerated conversion function. +func Convert_audit_ObjectReference_To_v1beta1_ObjectReference(in *audit.ObjectReference, out *ObjectReference, s conversion.Scope) error { + return autoConvert_audit_ObjectReference_To_v1beta1_ObjectReference(in, out, s) +} + +func autoConvert_v1beta1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]audit.PolicyRule)(unsafe.Pointer(&in.Rules)) + out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + return nil +} + +// Convert_v1beta1_Policy_To_audit_Policy is an autogenerated conversion function. +func Convert_v1beta1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error { + return autoConvert_v1beta1_Policy_To_audit_Policy(in, out, s) +} + +func autoConvert_audit_Policy_To_v1beta1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + return nil +} + +// Convert_audit_Policy_To_v1beta1_Policy is an autogenerated conversion function. +func Convert_audit_Policy_To_v1beta1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error { + return autoConvert_audit_Policy_To_v1beta1_Policy(in, out, s) +} + +func autoConvert_v1beta1_PolicyList_To_audit_PolicyList(in *PolicyList, out *audit.PolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]audit.Policy)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_PolicyList_To_audit_PolicyList is an autogenerated conversion function. +func Convert_v1beta1_PolicyList_To_audit_PolicyList(in *PolicyList, out *audit.PolicyList, s conversion.Scope) error { + return autoConvert_v1beta1_PolicyList_To_audit_PolicyList(in, out, s) +} + +func autoConvert_audit_PolicyList_To_v1beta1_PolicyList(in *audit.PolicyList, out *PolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Policy)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_audit_PolicyList_To_v1beta1_PolicyList is an autogenerated conversion function. +func Convert_audit_PolicyList_To_v1beta1_PolicyList(in *audit.PolicyList, out *PolicyList, s conversion.Scope) error { + return autoConvert_audit_PolicyList_To_v1beta1_PolicyList(in, out, s) +} + +func autoConvert_v1beta1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *audit.PolicyRule, s conversion.Scope) error { + out.Level = audit.Level(in.Level) + out.Users = *(*[]string)(unsafe.Pointer(&in.Users)) + out.UserGroups = *(*[]string)(unsafe.Pointer(&in.UserGroups)) + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.Resources = *(*[]audit.GroupResources)(unsafe.Pointer(&in.Resources)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + return nil +} + +// Convert_v1beta1_PolicyRule_To_audit_PolicyRule is an autogenerated conversion function. +func Convert_v1beta1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *audit.PolicyRule, s conversion.Scope) error { + return autoConvert_v1beta1_PolicyRule_To_audit_PolicyRule(in, out, s) +} + +func autoConvert_audit_PolicyRule_To_v1beta1_PolicyRule(in *audit.PolicyRule, out *PolicyRule, s conversion.Scope) error { + out.Level = Level(in.Level) + out.Users = *(*[]string)(unsafe.Pointer(&in.Users)) + out.UserGroups = *(*[]string)(unsafe.Pointer(&in.UserGroups)) + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.Resources = *(*[]GroupResources)(unsafe.Pointer(&in.Resources)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + return nil +} + +// Convert_audit_PolicyRule_To_v1beta1_PolicyRule is an autogenerated conversion function. +func Convert_audit_PolicyRule_To_v1beta1_PolicyRule(in *audit.PolicyRule, out *PolicyRule, s conversion.Scope) error { + return autoConvert_audit_PolicyRule_To_v1beta1_PolicyRule(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..5adbd5a78c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,293 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + in.User.DeepCopyInto(&out.User) + if in.ImpersonatedUser != nil { + in, out := &in.ImpersonatedUser, &out.ImpersonatedUser + *out = new(v1.UserInfo) + (*in).DeepCopyInto(*out) + } + if in.SourceIPs != nil { + in, out := &in.SourceIPs, &out.SourceIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ObjectRef != nil { + in, out := &in.ObjectRef, &out.ObjectRef + *out = new(ObjectReference) + **out = **in + } + if in.ResponseStatus != nil { + in, out := &in.ResponseStatus, &out.ResponseStatus + *out = new(metav1.Status) + (*in).DeepCopyInto(*out) + } + if in.RequestObject != nil { + in, out := &in.RequestObject, &out.RequestObject + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + if in.ResponseObject != nil { + in, out := &in.ResponseObject, &out.ResponseObject + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + in.RequestReceivedTimestamp.DeepCopyInto(&out.RequestReceivedTimestamp) + in.StageTimestamp.DeepCopyInto(&out.StageTimestamp) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResources) DeepCopyInto(out *GroupResources) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResources. +func (in *GroupResources) DeepCopy() *GroupResources { + if in == nil { + return nil + } + out := new(GroupResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyList) DeepCopyInto(out *PolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { + if in == nil { + return nil + } + out := new(PolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UserGroups != nil { + in, out := &in.UserGroups, &out.UserGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]GroupResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..73e63fc114 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go new file mode 100644 index 0000000000..0611c1ae5f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go @@ -0,0 +1,133 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/apis/audit" +) + +// ValidatePolicy validates the audit policy +func ValidatePolicy(policy *audit.Policy) field.ErrorList { + var allErrs field.ErrorList + allErrs = append(allErrs, validateOmitStages(policy.OmitStages, field.NewPath("omitStages"))...) + rulePath := field.NewPath("rules") + for i, rule := range policy.Rules { + allErrs = append(allErrs, validatePolicyRule(rule, rulePath.Index(i))...) + } + return allErrs +} + +func validatePolicyRule(rule audit.PolicyRule, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + allErrs = append(allErrs, validateLevel(rule.Level, fldPath.Child("level"))...) + allErrs = append(allErrs, validateNonResourceURLs(rule.NonResourceURLs, fldPath.Child("nonResourceURLs"))...) + allErrs = append(allErrs, validateResources(rule.Resources, fldPath.Child("resources"))...) + allErrs = append(allErrs, validateOmitStages(rule.OmitStages, fldPath.Child("omitStages"))...) + + if len(rule.NonResourceURLs) > 0 { + if len(rule.Resources) > 0 || len(rule.Namespaces) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nonResourceURLs"), rule.NonResourceURLs, "rules cannot apply to both regular resources and non-resource URLs")) + } + } + + return allErrs +} + +var validLevels = []string{ + string(audit.LevelNone), + string(audit.LevelMetadata), + string(audit.LevelRequest), + string(audit.LevelRequestResponse), +} + +var validOmitStages = []string{ + string(audit.StageRequestReceived), + string(audit.StageResponseStarted), + string(audit.StageResponseComplete), + string(audit.StagePanic), +} + +func validateLevel(level audit.Level, fldPath *field.Path) field.ErrorList { + switch level { + case audit.LevelNone, audit.LevelMetadata, audit.LevelRequest, audit.LevelRequestResponse: + return nil + case "": + return field.ErrorList{field.Required(fldPath, "")} + default: + return field.ErrorList{field.NotSupported(fldPath, level, validLevels)} + } +} + +func validateNonResourceURLs(urls []string, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for i, url := range urls { + if url == "*" { + continue + } + + if !strings.HasPrefix(url, "/") { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), url, "non-resource URL rules must begin with a '/' character")) + } + + if url != "" && strings.ContainsRune(url[:len(url)-1], '*') { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), url, "non-resource URL wildcards '*' must be the final character of the rule")) + } + } + return allErrs +} + +func validateResources(groupResources []audit.GroupResources, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for _, groupResource := range groupResources { + // The empty string represents the core API group. + if len(groupResource.Group) != 0 { + // Group names must be lower case and be valid DNS subdomains. + // reference: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md + // an error is returned for group name like rbac.authorization.k8s.io/v1beta1 + // rbac.authorization.k8s.io is the valid one + if msgs := validation.NameIsDNSSubdomain(groupResource.Group, false); len(msgs) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("group"), groupResource.Group, strings.Join(msgs, ","))) + } + } + + if len(groupResource.ResourceNames) > 0 && len(groupResource.Resources) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceNames"), groupResource.ResourceNames, "using resourceNames requires at least one resource")) + } + } + return allErrs +} + +func validateOmitStages(omitStages []audit.Stage, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for i, stage := range omitStages { + valid := false + for _, validOmitStage := range validOmitStages { + if string(stage) == validOmitStage { + valid = true + break + } + } + if !valid { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), string(stage), "allowed stages are "+strings.Join(validOmitStages, ","))) + } + } + return allErrs +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a210f631b2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go @@ -0,0 +1,291 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package audit + +import ( + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.User.DeepCopyInto(&out.User) + if in.ImpersonatedUser != nil { + in, out := &in.ImpersonatedUser, &out.ImpersonatedUser + *out = new(v1.UserInfo) + (*in).DeepCopyInto(*out) + } + if in.SourceIPs != nil { + in, out := &in.SourceIPs, &out.SourceIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ObjectRef != nil { + in, out := &in.ObjectRef, &out.ObjectRef + *out = new(ObjectReference) + **out = **in + } + if in.ResponseStatus != nil { + in, out := &in.ResponseStatus, &out.ResponseStatus + *out = new(metav1.Status) + (*in).DeepCopyInto(*out) + } + if in.RequestObject != nil { + in, out := &in.RequestObject, &out.RequestObject + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + if in.ResponseObject != nil { + in, out := &in.ResponseObject, &out.ResponseObject + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + in.RequestReceivedTimestamp.DeepCopyInto(&out.RequestReceivedTimestamp) + in.StageTimestamp.DeepCopyInto(&out.StageTimestamp) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResources) DeepCopyInto(out *GroupResources) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResources. +func (in *GroupResources) DeepCopy() *GroupResources { + if in == nil { + return nil + } + out := new(GroupResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyList) DeepCopyInto(out *PolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { + if in == nil { + return nil + } + out := new(PolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UserGroups != nil { + in, out := &in.UserGroups, &out.UserGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]GroupResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/doc.go b/vendor/k8s.io/apiserver/pkg/apis/config/doc.go new file mode 100644 index 0000000000..338d4cebfa --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package config // import "k8s.io/apiserver/pkg/apis/config" diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/register.go b/vendor/k8s.io/apiserver/pkg/apis/config/register.go new file mode 100644 index 0000000000..6a0aae8e56 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds this group to a scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package. +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this will get cleaned up with the scheme types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &EncryptionConfiguration{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/types.go b/vendor/k8s.io/apiserver/pkg/apis/config/types.go new file mode 100644 index 0000000000..5dddc97f96 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/types.go @@ -0,0 +1,100 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EncryptionConfiguration stores the complete configuration for encryption providers. +type EncryptionConfiguration struct { + metav1.TypeMeta + // resources is a list containing resources, and their corresponding encryption providers. + Resources []ResourceConfiguration +} + +// ResourceConfiguration stores per resource configuration. +type ResourceConfiguration struct { + // resources is a list of kubernetes resources which have to be encrypted. + Resources []string + // providers is a list of transformers to be used for reading and writing the resources to disk. + // eg: aesgcm, aescbc, secretbox, identity. + Providers []ProviderConfiguration +} + +// ProviderConfiguration stores the provided configuration for an encryption provider. +type ProviderConfiguration struct { + // aesgcm is the configuration for the AES-GCM transformer. + AESGCM *AESConfiguration + // aescbc is the configuration for the AES-CBC transformer. + AESCBC *AESConfiguration + // secretbox is the configuration for the Secretbox based transformer. + Secretbox *SecretboxConfiguration + // identity is the (empty) configuration for the identity transformer. + Identity *IdentityConfiguration + // kms contains the name, cache size and path to configuration file for a KMS based envelope transformer. + KMS *KMSConfiguration +} + +// AESConfiguration contains the API configuration for an AES transformer. +type AESConfiguration struct { + // keys is a list of keys to be used for creating the AES transformer. + // Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM. + Keys []Key +} + +// SecretboxConfiguration contains the API configuration for an Secretbox transformer. +type SecretboxConfiguration struct { + // keys is a list of keys to be used for creating the Secretbox transformer. + // Each key has to be 32 bytes long. + Keys []Key +} + +// Key contains name and secret of the provided key for a transformer. +type Key struct { + // name is the name of the key to be used while storing data to disk. + Name string + // secret is the actual key, encoded in base64. + Secret string +} + +// String implements Stringer interface in a log safe way. +func (k Key) String() string { + return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name) +} + +// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration. +type IdentityConfiguration struct{} + +// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer. +type KMSConfiguration struct { + // name is the name of the KMS plugin to be used. + Name string + // cachesize is the maximum number of secrets which are cached in memory. The default value is 1000. + // Set to a negative value to disable caching. + // +optional + CacheSize *int32 + // endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock". + Endpoint string + // timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds. + // +optional + Timeout *metav1.Duration +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go new file mode 100644 index 0000000000..2d529651a9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + defaultTimeout = &metav1.Duration{Duration: 3 * time.Second} + defaultCacheSize int32 = 1000 +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_KMSConfiguration applies defaults to KMSConfiguration. +func SetDefaults_KMSConfiguration(obj *KMSConfiguration) { + if obj.Timeout == nil { + obj.Timeout = defaultTimeout + } + + if obj.CacheSize == nil { + obj.CacheSize = &defaultCacheSize + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go new file mode 100644 index 0000000000..b1a18ccab5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/config +// +k8s:deepcopy-gen=package +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io + +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go new file mode 100644 index 0000000000..32b5634c44 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package. +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // AddToScheme adds this group to a scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) + localSchemeBuilder.Register(addDefaultingFuncs) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EncryptionConfiguration{}, + ) + // also register into the v1 group as EncryptionConfig (due to a docs bug) + scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "EncryptionConfig"}, &EncryptionConfiguration{}) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go new file mode 100644 index 0000000000..d7d68d2584 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go @@ -0,0 +1,100 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EncryptionConfiguration stores the complete configuration for encryption providers. +type EncryptionConfiguration struct { + metav1.TypeMeta + // resources is a list containing resources, and their corresponding encryption providers. + Resources []ResourceConfiguration `json:"resources"` +} + +// ResourceConfiguration stores per resource configuration. +type ResourceConfiguration struct { + // resources is a list of kubernetes resources which have to be encrypted. + Resources []string `json:"resources"` + // providers is a list of transformers to be used for reading and writing the resources to disk. + // eg: aesgcm, aescbc, secretbox, identity. + Providers []ProviderConfiguration `json:"providers"` +} + +// ProviderConfiguration stores the provided configuration for an encryption provider. +type ProviderConfiguration struct { + // aesgcm is the configuration for the AES-GCM transformer. + AESGCM *AESConfiguration `json:"aesgcm,omitempty"` + // aescbc is the configuration for the AES-CBC transformer. + AESCBC *AESConfiguration `json:"aescbc,omitempty"` + // secretbox is the configuration for the Secretbox based transformer. + Secretbox *SecretboxConfiguration `json:"secretbox,omitempty"` + // identity is the (empty) configuration for the identity transformer. + Identity *IdentityConfiguration `json:"identity,omitempty"` + // kms contains the name, cache size and path to configuration file for a KMS based envelope transformer. + KMS *KMSConfiguration `json:"kms,omitempty"` +} + +// AESConfiguration contains the API configuration for an AES transformer. +type AESConfiguration struct { + // keys is a list of keys to be used for creating the AES transformer. + // Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM. + Keys []Key `json:"keys"` +} + +// SecretboxConfiguration contains the API configuration for an Secretbox transformer. +type SecretboxConfiguration struct { + // keys is a list of keys to be used for creating the Secretbox transformer. + // Each key has to be 32 bytes long. + Keys []Key `json:"keys"` +} + +// Key contains name and secret of the provided key for a transformer. +type Key struct { + // name is the name of the key to be used while storing data to disk. + Name string `json:"name"` + // secret is the actual key, encoded in base64. + Secret string `json:"secret"` +} + +// String implements Stringer interface in a log safe way. +func (k Key) String() string { + return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name) +} + +// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration. +type IdentityConfiguration struct{} + +// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer. +type KMSConfiguration struct { + // name is the name of the KMS plugin to be used. + Name string `json:"name"` + // cachesize is the maximum number of secrets which are cached in memory. The default value is 1000. + // Set to a negative value to disable caching. + // +optional + CacheSize *int32 `json:"cachesize,omitempty"` + // endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock". + Endpoint string `json:"endpoint"` + // timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds. + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..c7de6539d8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go @@ -0,0 +1,296 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + config "k8s.io/apiserver/pkg/apis/config" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AESConfiguration)(nil), (*config.AESConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AESConfiguration_To_config_AESConfiguration(a.(*AESConfiguration), b.(*config.AESConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.AESConfiguration)(nil), (*AESConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_AESConfiguration_To_v1_AESConfiguration(a.(*config.AESConfiguration), b.(*AESConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*EncryptionConfiguration)(nil), (*config.EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(a.(*EncryptionConfiguration), b.(*config.EncryptionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.EncryptionConfiguration)(nil), (*EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(a.(*config.EncryptionConfiguration), b.(*EncryptionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*IdentityConfiguration)(nil), (*config.IdentityConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_IdentityConfiguration_To_config_IdentityConfiguration(a.(*IdentityConfiguration), b.(*config.IdentityConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.IdentityConfiguration)(nil), (*IdentityConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration(a.(*config.IdentityConfiguration), b.(*IdentityConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KMSConfiguration)(nil), (*config.KMSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_KMSConfiguration_To_config_KMSConfiguration(a.(*KMSConfiguration), b.(*config.KMSConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.KMSConfiguration)(nil), (*KMSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KMSConfiguration_To_v1_KMSConfiguration(a.(*config.KMSConfiguration), b.(*KMSConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Key)(nil), (*config.Key)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Key_To_config_Key(a.(*Key), b.(*config.Key), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.Key)(nil), (*Key)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_Key_To_v1_Key(a.(*config.Key), b.(*Key), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ProviderConfiguration)(nil), (*config.ProviderConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ProviderConfiguration_To_config_ProviderConfiguration(a.(*ProviderConfiguration), b.(*config.ProviderConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ProviderConfiguration)(nil), (*ProviderConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ProviderConfiguration_To_v1_ProviderConfiguration(a.(*config.ProviderConfiguration), b.(*ProviderConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ResourceConfiguration)(nil), (*config.ResourceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceConfiguration_To_config_ResourceConfiguration(a.(*ResourceConfiguration), b.(*config.ResourceConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ResourceConfiguration)(nil), (*ResourceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ResourceConfiguration_To_v1_ResourceConfiguration(a.(*config.ResourceConfiguration), b.(*ResourceConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SecretboxConfiguration)(nil), (*config.SecretboxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(a.(*SecretboxConfiguration), b.(*config.SecretboxConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SecretboxConfiguration)(nil), (*SecretboxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(a.(*config.SecretboxConfiguration), b.(*SecretboxConfiguration), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_AESConfiguration_To_config_AESConfiguration(in *AESConfiguration, out *config.AESConfiguration, s conversion.Scope) error { + out.Keys = *(*[]config.Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_v1_AESConfiguration_To_config_AESConfiguration is an autogenerated conversion function. +func Convert_v1_AESConfiguration_To_config_AESConfiguration(in *AESConfiguration, out *config.AESConfiguration, s conversion.Scope) error { + return autoConvert_v1_AESConfiguration_To_config_AESConfiguration(in, out, s) +} + +func autoConvert_config_AESConfiguration_To_v1_AESConfiguration(in *config.AESConfiguration, out *AESConfiguration, s conversion.Scope) error { + out.Keys = *(*[]Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_config_AESConfiguration_To_v1_AESConfiguration is an autogenerated conversion function. +func Convert_config_AESConfiguration_To_v1_AESConfiguration(in *config.AESConfiguration, out *AESConfiguration, s conversion.Scope) error { + return autoConvert_config_AESConfiguration_To_v1_AESConfiguration(in, out, s) +} + +func autoConvert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(in *EncryptionConfiguration, out *config.EncryptionConfiguration, s conversion.Scope) error { + out.Resources = *(*[]config.ResourceConfiguration)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration is an autogenerated conversion function. +func Convert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(in *EncryptionConfiguration, out *config.EncryptionConfiguration, s conversion.Scope) error { + return autoConvert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(in, out, s) +} + +func autoConvert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(in *config.EncryptionConfiguration, out *EncryptionConfiguration, s conversion.Scope) error { + out.Resources = *(*[]ResourceConfiguration)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration is an autogenerated conversion function. +func Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(in *config.EncryptionConfiguration, out *EncryptionConfiguration, s conversion.Scope) error { + return autoConvert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(in, out, s) +} + +func autoConvert_v1_IdentityConfiguration_To_config_IdentityConfiguration(in *IdentityConfiguration, out *config.IdentityConfiguration, s conversion.Scope) error { + return nil +} + +// Convert_v1_IdentityConfiguration_To_config_IdentityConfiguration is an autogenerated conversion function. +func Convert_v1_IdentityConfiguration_To_config_IdentityConfiguration(in *IdentityConfiguration, out *config.IdentityConfiguration, s conversion.Scope) error { + return autoConvert_v1_IdentityConfiguration_To_config_IdentityConfiguration(in, out, s) +} + +func autoConvert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in *config.IdentityConfiguration, out *IdentityConfiguration, s conversion.Scope) error { + return nil +} + +// Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration is an autogenerated conversion function. +func Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in *config.IdentityConfiguration, out *IdentityConfiguration, s conversion.Scope) error { + return autoConvert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in, out, s) +} + +func autoConvert_v1_KMSConfiguration_To_config_KMSConfiguration(in *KMSConfiguration, out *config.KMSConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize)) + out.Endpoint = in.Endpoint + out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout)) + return nil +} + +// Convert_v1_KMSConfiguration_To_config_KMSConfiguration is an autogenerated conversion function. +func Convert_v1_KMSConfiguration_To_config_KMSConfiguration(in *KMSConfiguration, out *config.KMSConfiguration, s conversion.Scope) error { + return autoConvert_v1_KMSConfiguration_To_config_KMSConfiguration(in, out, s) +} + +func autoConvert_config_KMSConfiguration_To_v1_KMSConfiguration(in *config.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize)) + out.Endpoint = in.Endpoint + out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout)) + return nil +} + +// Convert_config_KMSConfiguration_To_v1_KMSConfiguration is an autogenerated conversion function. +func Convert_config_KMSConfiguration_To_v1_KMSConfiguration(in *config.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error { + return autoConvert_config_KMSConfiguration_To_v1_KMSConfiguration(in, out, s) +} + +func autoConvert_v1_Key_To_config_Key(in *Key, out *config.Key, s conversion.Scope) error { + out.Name = in.Name + out.Secret = in.Secret + return nil +} + +// Convert_v1_Key_To_config_Key is an autogenerated conversion function. +func Convert_v1_Key_To_config_Key(in *Key, out *config.Key, s conversion.Scope) error { + return autoConvert_v1_Key_To_config_Key(in, out, s) +} + +func autoConvert_config_Key_To_v1_Key(in *config.Key, out *Key, s conversion.Scope) error { + out.Name = in.Name + out.Secret = in.Secret + return nil +} + +// Convert_config_Key_To_v1_Key is an autogenerated conversion function. +func Convert_config_Key_To_v1_Key(in *config.Key, out *Key, s conversion.Scope) error { + return autoConvert_config_Key_To_v1_Key(in, out, s) +} + +func autoConvert_v1_ProviderConfiguration_To_config_ProviderConfiguration(in *ProviderConfiguration, out *config.ProviderConfiguration, s conversion.Scope) error { + out.AESGCM = (*config.AESConfiguration)(unsafe.Pointer(in.AESGCM)) + out.AESCBC = (*config.AESConfiguration)(unsafe.Pointer(in.AESCBC)) + out.Secretbox = (*config.SecretboxConfiguration)(unsafe.Pointer(in.Secretbox)) + out.Identity = (*config.IdentityConfiguration)(unsafe.Pointer(in.Identity)) + out.KMS = (*config.KMSConfiguration)(unsafe.Pointer(in.KMS)) + return nil +} + +// Convert_v1_ProviderConfiguration_To_config_ProviderConfiguration is an autogenerated conversion function. +func Convert_v1_ProviderConfiguration_To_config_ProviderConfiguration(in *ProviderConfiguration, out *config.ProviderConfiguration, s conversion.Scope) error { + return autoConvert_v1_ProviderConfiguration_To_config_ProviderConfiguration(in, out, s) +} + +func autoConvert_config_ProviderConfiguration_To_v1_ProviderConfiguration(in *config.ProviderConfiguration, out *ProviderConfiguration, s conversion.Scope) error { + out.AESGCM = (*AESConfiguration)(unsafe.Pointer(in.AESGCM)) + out.AESCBC = (*AESConfiguration)(unsafe.Pointer(in.AESCBC)) + out.Secretbox = (*SecretboxConfiguration)(unsafe.Pointer(in.Secretbox)) + out.Identity = (*IdentityConfiguration)(unsafe.Pointer(in.Identity)) + out.KMS = (*KMSConfiguration)(unsafe.Pointer(in.KMS)) + return nil +} + +// Convert_config_ProviderConfiguration_To_v1_ProviderConfiguration is an autogenerated conversion function. +func Convert_config_ProviderConfiguration_To_v1_ProviderConfiguration(in *config.ProviderConfiguration, out *ProviderConfiguration, s conversion.Scope) error { + return autoConvert_config_ProviderConfiguration_To_v1_ProviderConfiguration(in, out, s) +} + +func autoConvert_v1_ResourceConfiguration_To_config_ResourceConfiguration(in *ResourceConfiguration, out *config.ResourceConfiguration, s conversion.Scope) error { + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.Providers = *(*[]config.ProviderConfiguration)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_v1_ResourceConfiguration_To_config_ResourceConfiguration is an autogenerated conversion function. +func Convert_v1_ResourceConfiguration_To_config_ResourceConfiguration(in *ResourceConfiguration, out *config.ResourceConfiguration, s conversion.Scope) error { + return autoConvert_v1_ResourceConfiguration_To_config_ResourceConfiguration(in, out, s) +} + +func autoConvert_config_ResourceConfiguration_To_v1_ResourceConfiguration(in *config.ResourceConfiguration, out *ResourceConfiguration, s conversion.Scope) error { + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.Providers = *(*[]ProviderConfiguration)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_config_ResourceConfiguration_To_v1_ResourceConfiguration is an autogenerated conversion function. +func Convert_config_ResourceConfiguration_To_v1_ResourceConfiguration(in *config.ResourceConfiguration, out *ResourceConfiguration, s conversion.Scope) error { + return autoConvert_config_ResourceConfiguration_To_v1_ResourceConfiguration(in, out, s) +} + +func autoConvert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(in *SecretboxConfiguration, out *config.SecretboxConfiguration, s conversion.Scope) error { + out.Keys = *(*[]config.Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration is an autogenerated conversion function. +func Convert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(in *SecretboxConfiguration, out *config.SecretboxConfiguration, s conversion.Scope) error { + return autoConvert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(in, out, s) +} + +func autoConvert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *config.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error { + out.Keys = *(*[]Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration is an autogenerated conversion function. +func Convert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *config.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error { + return autoConvert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..dcb4e85529 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go @@ -0,0 +1,227 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration. +func (in *AESConfiguration) DeepCopy() *AESConfiguration { + if in == nil { + return nil + } + out := new(AESConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration. +func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration { + if in == nil { + return nil + } + out := new(EncryptionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration. +func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration { + if in == nil { + return nil + } + out := new(IdentityConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) { + *out = *in + if in.CacheSize != nil { + in, out := &in.CacheSize, &out.CacheSize + *out = new(int32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration. +func (in *KMSConfiguration) DeepCopy() *KMSConfiguration { + if in == nil { + return nil + } + out := new(KMSConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Key) DeepCopyInto(out *Key) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key. +func (in *Key) DeepCopy() *Key { + if in == nil { + return nil + } + out := new(Key) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) { + *out = *in + if in.AESGCM != nil { + in, out := &in.AESGCM, &out.AESGCM + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AESCBC != nil { + in, out := &in.AESCBC, &out.AESCBC + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Secretbox != nil { + in, out := &in.Secretbox, &out.Secretbox + *out = new(SecretboxConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityConfiguration) + **out = **in + } + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration. +func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration { + if in == nil { + return nil + } + out := new(ProviderConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]ProviderConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration. +func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration { + if in == nil { + return nil + } + out := new(ResourceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration. +func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration { + if in == nil { + return nil + } + out := new(SecretboxConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..1c8db8d04f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go @@ -0,0 +1,45 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&EncryptionConfiguration{}, func(obj interface{}) { SetObjectDefaults_EncryptionConfiguration(obj.(*EncryptionConfiguration)) }) + return nil +} + +func SetObjectDefaults_EncryptionConfiguration(in *EncryptionConfiguration) { + for i := range in.Resources { + a := &in.Resources[i] + for j := range a.Providers { + b := &a.Providers[j] + if b.KMS != nil { + SetDefaults_KMSConfiguration(b.KMS) + } + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go new file mode 100644 index 0000000000..966ff1f0d1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go @@ -0,0 +1,220 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation validates EncryptionConfiguration. +package validation + +import ( + "encoding/base64" + "fmt" + "net/url" + + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/apis/config" +) + +const ( + moreThanOneElementErr = "more than one provider specified in a single element, should split into different list elements" + keyLenErrFmt = "secret is not of the expected length, got %d, expected one of %v" + unsupportedSchemeErrFmt = "unsupported scheme %q for KMS provider, only unix is supported" + atLeastOneRequiredErrFmt = "at least one %s is required" + invalidURLErrFmt = "invalid endpoint for kms provider, error: parse %s: net/url: invalid control character in URL" + mandatoryFieldErrFmt = "%s is a mandatory field for a %s" + base64EncodingErr = "secrets must be base64 encoded" + zeroOrNegativeErrFmt = "%s should be a positive value" + nonZeroErrFmt = "%s should be a positive value, or negative to disable" + encryptionConfigNilErr = "EncryptionConfiguration can't be nil" +) + +var ( + aesKeySizes = []int{16, 24, 32} + // See https://golang.org/pkg/crypto/aes/#NewCipher for details on supported key sizes for AES. + secretBoxKeySizes = []int{32} + // See https://godoc.org/golang.org/x/crypto/nacl/secretbox#Open for details on the supported key sizes for Secretbox. + root = field.NewPath("resources") +) + +// ValidateEncryptionConfiguration validates a v1.EncryptionConfiguration. +func ValidateEncryptionConfiguration(c *config.EncryptionConfiguration) field.ErrorList { + allErrs := field.ErrorList{} + + if c == nil { + allErrs = append(allErrs, field.Required(root, "EncryptionConfiguration can't be nil")) + return allErrs + } + + if len(c.Resources) == 0 { + allErrs = append(allErrs, field.Required(root, fmt.Sprintf(atLeastOneRequiredErrFmt, root))) + return allErrs + } + + for i, conf := range c.Resources { + r := root.Index(i).Child("resources") + p := root.Index(i).Child("providers") + + if len(conf.Resources) == 0 { + allErrs = append(allErrs, field.Required(r, fmt.Sprintf(atLeastOneRequiredErrFmt, r))) + } + + if len(conf.Providers) == 0 { + allErrs = append(allErrs, field.Required(p, fmt.Sprintf(atLeastOneRequiredErrFmt, p))) + } + + for j, provider := range conf.Providers { + path := p.Index(j) + allErrs = append(allErrs, validateSingleProvider(provider, path)...) + + switch { + case provider.KMS != nil: + allErrs = append(allErrs, validateKMSConfiguration(provider.KMS, path.Child("kms"))...) + case provider.AESGCM != nil: + allErrs = append(allErrs, validateKeys(provider.AESGCM.Keys, path.Child("aesgcm").Child("keys"), aesKeySizes)...) + case provider.AESCBC != nil: + allErrs = append(allErrs, validateKeys(provider.AESCBC.Keys, path.Child("aescbc").Child("keys"), aesKeySizes)...) + case provider.Secretbox != nil: + allErrs = append(allErrs, validateKeys(provider.Secretbox.Keys, path.Child("secretbox").Child("keys"), secretBoxKeySizes)...) + } + } + } + + return allErrs +} + +func validateSingleProvider(provider config.ProviderConfiguration, filedPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + found := 0 + + if provider.KMS != nil { + found++ + } + if provider.AESGCM != nil { + found++ + } + if provider.AESCBC != nil { + found++ + } + if provider.Secretbox != nil { + found++ + } + if provider.Identity != nil { + found++ + } + + if found == 0 { + return append(allErrs, field.Invalid(filedPath, provider, "provider does not contain any of the expected providers: KMS, AESGCM, AESCBC, Secretbox, Identity")) + } + + if found > 1 { + return append(allErrs, field.Invalid(filedPath, provider, moreThanOneElementErr)) + } + + return allErrs +} + +func validateKeys(keys []config.Key, fieldPath *field.Path, expectedLen []int) field.ErrorList { + allErrs := field.ErrorList{} + + if len(keys) == 0 { + allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf(atLeastOneRequiredErrFmt, "keys"))) + return allErrs + } + + for i, key := range keys { + allErrs = append(allErrs, validateKey(key, fieldPath.Index(i), expectedLen)...) + } + + return allErrs +} + +func validateKey(key config.Key, fieldPath *field.Path, expectedLen []int) field.ErrorList { + allErrs := field.ErrorList{} + + if key.Name == "" { + allErrs = append(allErrs, field.Required(fieldPath.Child("name"), fmt.Sprintf(mandatoryFieldErrFmt, "name", "key"))) + } + + if key.Secret == "" { + allErrs = append(allErrs, field.Required(fieldPath.Child("secret"), fmt.Sprintf(mandatoryFieldErrFmt, "secret", "key"))) + return allErrs + } + + secret, err := base64.StdEncoding.DecodeString(key.Secret) + if err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("secret"), "REDACTED", base64EncodingErr)) + return allErrs + } + + lenMatched := false + for _, l := range expectedLen { + if len(secret) == l { + lenMatched = true + break + } + } + + if !lenMatched { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("secret"), "REDACTED", fmt.Sprintf(keyLenErrFmt, len(secret), expectedLen))) + } + + return allErrs +} + +func validateKMSConfiguration(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if c.Name == "" { + allErrs = append(allErrs, field.Required(fieldPath.Child("name"), fmt.Sprintf(mandatoryFieldErrFmt, "name", "provider"))) + } + allErrs = append(allErrs, validateKMSTimeout(c, fieldPath.Child("timeout"))...) + allErrs = append(allErrs, validateKMSEndpoint(c, fieldPath.Child("endpoint"))...) + allErrs = append(allErrs, validateKMSCacheSize(c, fieldPath.Child("cachesize"))...) + return allErrs +} + +func validateKMSCacheSize(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if *c.CacheSize == 0 { + allErrs = append(allErrs, field.Invalid(fieldPath, *c.CacheSize, fmt.Sprintf(nonZeroErrFmt, "cachesize"))) + } + + return allErrs +} + +func validateKMSTimeout(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if c.Timeout.Duration <= 0 { + allErrs = append(allErrs, field.Invalid(fieldPath, c.Timeout, fmt.Sprintf(zeroOrNegativeErrFmt, "timeout"))) + } + + return allErrs +} + +func validateKMSEndpoint(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(c.Endpoint) == 0 { + return append(allErrs, field.Invalid(fieldPath, "", fmt.Sprintf(mandatoryFieldErrFmt, "endpoint", "kms"))) + } + + u, err := url.Parse(c.Endpoint) + if err != nil { + return append(allErrs, field.Invalid(fieldPath, c.Endpoint, fmt.Sprintf("invalid endpoint for kms provider, error: %v", err))) + } + + if u.Scheme != "unix" { + return append(allErrs, field.Invalid(fieldPath, c.Endpoint, fmt.Sprintf(unsupportedSchemeErrFmt, u.Scheme))) + } + + return allErrs +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go new file mode 100644 index 0000000000..dd66315ee7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go @@ -0,0 +1,227 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration. +func (in *AESConfiguration) DeepCopy() *AESConfiguration { + if in == nil { + return nil + } + out := new(AESConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration. +func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration { + if in == nil { + return nil + } + out := new(EncryptionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration. +func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration { + if in == nil { + return nil + } + out := new(IdentityConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) { + *out = *in + if in.CacheSize != nil { + in, out := &in.CacheSize, &out.CacheSize + *out = new(int32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration. +func (in *KMSConfiguration) DeepCopy() *KMSConfiguration { + if in == nil { + return nil + } + out := new(KMSConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Key) DeepCopyInto(out *Key) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key. +func (in *Key) DeepCopy() *Key { + if in == nil { + return nil + } + out := new(Key) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) { + *out = *in + if in.AESGCM != nil { + in, out := &in.AESGCM, &out.AESGCM + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AESCBC != nil { + in, out := &in.AESCBC, &out.AESCBC + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Secretbox != nil { + in, out := &in.Secretbox, &out.Secretbox + *out = new(SecretboxConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityConfiguration) + **out = **in + } + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration. +func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration { + if in == nil { + return nil + } + out := new(ProviderConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]ProviderConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration. +func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration { + if in == nil { + return nil + } + out := new(ResourceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration. +func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration { + if in == nil { + return nil + } + out := new(SecretboxConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go new file mode 100644 index 0000000000..f3e9a1a7bd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go @@ -0,0 +1,475 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bootstrap + +import ( + coordinationv1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" + flowcontrol "k8s.io/api/flowcontrol/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" +) + +// The objects that define an apiserver's initial behavior. The +// registered defaulting procedures make no changes to these +// particular objects (this is verified in the unit tests of the +// internalbootstrap package; it can not be verified in this package +// because that would require importing k8s.io/kubernetes). +var ( + MandatoryPriorityLevelConfigurations = []*flowcontrol.PriorityLevelConfiguration{ + MandatoryPriorityLevelConfigurationCatchAll, + MandatoryPriorityLevelConfigurationExempt, + } + MandatoryFlowSchemas = []*flowcontrol.FlowSchema{ + MandatoryFlowSchemaExempt, + MandatoryFlowSchemaCatchAll, + } +) + +// The objects that define the current suggested additional configuration +var ( + SuggestedPriorityLevelConfigurations = []*flowcontrol.PriorityLevelConfiguration{ + // "system" priority-level is for the system components that affects self-maintenance of the + // cluster and the availability of those running pods in the cluster, including kubelet and + // kube-proxy. + SuggestedPriorityLevelConfigurationSystem, + // "leader-election" is dedicated for controllers' leader-election, which majorly affects the + // availability of any controller runs in the cluster. + SuggestedPriorityLevelConfigurationLeaderElection, + // "workload-high" is used by those workloads with higher priority but their failure won't directly + // impact the existing running pods in the cluster, which includes kube-scheduler, and those well-known + // built-in workloads such as "deployments", "replicasets" and other low-level custom workload which + // is important for the cluster. + SuggestedPriorityLevelConfigurationWorkloadHigh, + // "workload-low" is used by those workloads with lower priority which availability only has a + // minor impact on the cluster. + SuggestedPriorityLevelConfigurationWorkloadLow, + // "global-default" serves the rest traffic not handled by the other suggested flow-schemas above. + SuggestedPriorityLevelConfigurationGlobalDefault, + } + SuggestedFlowSchemas = []*flowcontrol.FlowSchema{ + SuggestedFlowSchemaSystemNodes, // references "system" priority-level + SuggestedFlowSchemaSystemLeaderElection, // references "leader-election" priority-level + SuggestedFlowSchemaWorkloadLeaderElection, // references "leader-election" priority-level + SuggestedFlowSchemaKubeControllerManager, // references "workload-high" priority-level + SuggestedFlowSchemaKubeScheduler, // references "workload-high" priority-level + SuggestedFlowSchemaKubeSystemServiceAccounts, // references "workload-high" priority-level + SuggestedFlowSchemaServiceAccounts, // references "workload-low" priority-level + SuggestedFlowSchemaGlobalDefault, // references "global-default" priority-level + } +) + +// Mandatory PriorityLevelConfiguration objects +var ( + MandatoryPriorityLevelConfigurationExempt = newPriorityLevelConfiguration( + flowcontrol.PriorityLevelConfigurationNameExempt, + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementExempt, + }, + ) + MandatoryPriorityLevelConfigurationCatchAll = newPriorityLevelConfiguration( + "catch-all", + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementLimited, + Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ + AssuredConcurrencyShares: 5, + LimitResponse: flowcontrol.LimitResponse{ + Type: flowcontrol.LimitResponseTypeReject, + }, + }, + }) +) + +// Mandatory FlowSchema objects +var ( + // "exempt" priority-level is used for preventing priority inversion and ensuring that sysadmin + // requests are always possible. + MandatoryFlowSchemaExempt = newFlowSchema( + "exempt", + flowcontrol.PriorityLevelConfigurationNameExempt, + 1, // matchingPrecedence + "", // distinguisherMethodType + flowcontrol.PolicyRulesWithSubjects{ + Subjects: groups(user.SystemPrivilegedGroup), + ResourceRules: []flowcontrol.ResourcePolicyRule{ + resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true, + ), + }, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}, + ), + }, + }, + ) + // "catch-all" priority-level only gets a minimal positive share of concurrency and won't be reaching + // ideally unless you intentionally deleted the suggested "global-default". + MandatoryFlowSchemaCatchAll = newFlowSchema( + "catch-all", + "catch-all", + 10000, // matchingPrecedence + flowcontrol.FlowDistinguisherMethodByUserType, // distinguisherMethodType + flowcontrol.PolicyRulesWithSubjects{ + Subjects: groups(user.AllUnauthenticated, user.AllAuthenticated), + ResourceRules: []flowcontrol.ResourcePolicyRule{ + resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true, + ), + }, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}, + ), + }, + }, + ) +) + +// Suggested PriorityLevelConfiguration objects +var ( + // system priority-level + SuggestedPriorityLevelConfigurationSystem = newPriorityLevelConfiguration( + "system", + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementLimited, + Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ + AssuredConcurrencyShares: 30, + LimitResponse: flowcontrol.LimitResponse{ + Type: flowcontrol.LimitResponseTypeQueue, + Queuing: &flowcontrol.QueuingConfiguration{ + Queues: 64, + HandSize: 6, + QueueLengthLimit: 50, + }, + }, + }, + }) + // leader-election priority-level + SuggestedPriorityLevelConfigurationLeaderElection = newPriorityLevelConfiguration( + "leader-election", + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementLimited, + Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ + AssuredConcurrencyShares: 10, + LimitResponse: flowcontrol.LimitResponse{ + Type: flowcontrol.LimitResponseTypeQueue, + Queuing: &flowcontrol.QueuingConfiguration{ + Queues: 16, + HandSize: 4, + QueueLengthLimit: 50, + }, + }, + }, + }) + // workload-high priority-level + SuggestedPriorityLevelConfigurationWorkloadHigh = newPriorityLevelConfiguration( + "workload-high", + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementLimited, + Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ + AssuredConcurrencyShares: 40, + LimitResponse: flowcontrol.LimitResponse{ + Type: flowcontrol.LimitResponseTypeQueue, + Queuing: &flowcontrol.QueuingConfiguration{ + Queues: 128, + HandSize: 6, + QueueLengthLimit: 50, + }, + }, + }, + }) + // workload-low priority-level + SuggestedPriorityLevelConfigurationWorkloadLow = newPriorityLevelConfiguration( + "workload-low", + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementLimited, + Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ + AssuredConcurrencyShares: 100, + LimitResponse: flowcontrol.LimitResponse{ + Type: flowcontrol.LimitResponseTypeQueue, + Queuing: &flowcontrol.QueuingConfiguration{ + Queues: 128, + HandSize: 6, + QueueLengthLimit: 50, + }, + }, + }, + }) + // global-default priority-level + SuggestedPriorityLevelConfigurationGlobalDefault = newPriorityLevelConfiguration( + "global-default", + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementLimited, + Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ + AssuredConcurrencyShares: 20, + LimitResponse: flowcontrol.LimitResponse{ + Type: flowcontrol.LimitResponseTypeQueue, + Queuing: &flowcontrol.QueuingConfiguration{ + Queues: 128, + HandSize: 6, + QueueLengthLimit: 50, + }, + }, + }, + }) +) + +// Suggested FlowSchema objects +var ( + SuggestedFlowSchemaSystemNodes = newFlowSchema( + "system-nodes", "system", 500, + flowcontrol.FlowDistinguisherMethodByUserType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: groups(user.NodesGroup), // the nodes group + ResourceRules: []flowcontrol.ResourcePolicyRule{resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true)}, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}), + }, + }, + ) + SuggestedFlowSchemaSystemLeaderElection = newFlowSchema( + "system-leader-election", "leader-election", 100, + flowcontrol.FlowDistinguisherMethodByUserType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: append( + users(user.KubeControllerManager, user.KubeScheduler), + kubeSystemServiceAccount(flowcontrol.NameAll)...), + ResourceRules: []flowcontrol.ResourcePolicyRule{ + resourceRule( + []string{"get", "create", "update"}, + []string{corev1.GroupName}, + []string{"endpoints", "configmaps"}, + []string{"kube-system"}, + false), + resourceRule( + []string{"get", "create", "update"}, + []string{coordinationv1.GroupName}, + []string{"leases"}, + []string{flowcontrol.NamespaceEvery}, + false), + }, + }, + ) + SuggestedFlowSchemaWorkloadLeaderElection = newFlowSchema( + "workload-leader-election", "leader-election", 200, + flowcontrol.FlowDistinguisherMethodByUserType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: kubeSystemServiceAccount(flowcontrol.NameAll), + ResourceRules: []flowcontrol.ResourcePolicyRule{ + resourceRule( + []string{"get", "create", "update"}, + []string{corev1.GroupName}, + []string{"endpoints", "configmaps"}, + []string{flowcontrol.NamespaceEvery}, + false), + resourceRule( + []string{"get", "create", "update"}, + []string{coordinationv1.GroupName}, + []string{"leases"}, + []string{flowcontrol.NamespaceEvery}, + false), + }, + }, + ) + SuggestedFlowSchemaKubeControllerManager = newFlowSchema( + "kube-controller-manager", "workload-high", 800, + flowcontrol.FlowDistinguisherMethodByNamespaceType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: users(user.KubeControllerManager), + ResourceRules: []flowcontrol.ResourcePolicyRule{resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true)}, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}), + }, + }, + ) + SuggestedFlowSchemaKubeScheduler = newFlowSchema( + "kube-scheduler", "workload-high", 800, + flowcontrol.FlowDistinguisherMethodByNamespaceType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: users(user.KubeScheduler), + ResourceRules: []flowcontrol.ResourcePolicyRule{resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true)}, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}), + }, + }, + ) + SuggestedFlowSchemaKubeSystemServiceAccounts = newFlowSchema( + "kube-system-service-accounts", "workload-high", 900, + flowcontrol.FlowDistinguisherMethodByNamespaceType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: kubeSystemServiceAccount(flowcontrol.NameAll), + ResourceRules: []flowcontrol.ResourcePolicyRule{resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true)}, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}), + }, + }, + ) + SuggestedFlowSchemaServiceAccounts = newFlowSchema( + "service-accounts", "workload-low", 9000, + flowcontrol.FlowDistinguisherMethodByUserType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: groups(serviceaccount.AllServiceAccountsGroup), + ResourceRules: []flowcontrol.ResourcePolicyRule{resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true)}, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}), + }, + }, + ) + SuggestedFlowSchemaGlobalDefault = newFlowSchema( + "global-default", "global-default", 9900, + flowcontrol.FlowDistinguisherMethodByUserType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: groups(user.AllUnauthenticated, user.AllAuthenticated), + ResourceRules: []flowcontrol.ResourcePolicyRule{resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true)}, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}), + }, + }, + ) +) + +func newPriorityLevelConfiguration(name string, spec flowcontrol.PriorityLevelConfigurationSpec) *flowcontrol.PriorityLevelConfiguration { + return &flowcontrol.PriorityLevelConfiguration{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: spec} +} + +func newFlowSchema(name, plName string, matchingPrecedence int32, dmType flowcontrol.FlowDistinguisherMethodType, rules ...flowcontrol.PolicyRulesWithSubjects) *flowcontrol.FlowSchema { + var dm *flowcontrol.FlowDistinguisherMethod + if dmType != "" { + dm = &flowcontrol.FlowDistinguisherMethod{Type: dmType} + } + return &flowcontrol.FlowSchema{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: flowcontrol.FlowSchemaSpec{ + PriorityLevelConfiguration: flowcontrol.PriorityLevelConfigurationReference{ + Name: plName, + }, + MatchingPrecedence: matchingPrecedence, + DistinguisherMethod: dm, + Rules: rules}, + } + +} + +func groups(names ...string) []flowcontrol.Subject { + ans := make([]flowcontrol.Subject, len(names)) + for idx, name := range names { + ans[idx] = flowcontrol.Subject{ + Kind: flowcontrol.SubjectKindGroup, + Group: &flowcontrol.GroupSubject{ + Name: name, + }, + } + } + return ans +} + +func users(names ...string) []flowcontrol.Subject { + ans := make([]flowcontrol.Subject, len(names)) + for idx, name := range names { + ans[idx] = flowcontrol.Subject{ + Kind: flowcontrol.SubjectKindUser, + User: &flowcontrol.UserSubject{ + Name: name, + }, + } + } + return ans +} + +func kubeSystemServiceAccount(names ...string) []flowcontrol.Subject { + subjects := []flowcontrol.Subject{} + for _, name := range names { + subjects = append(subjects, flowcontrol.Subject{ + Kind: flowcontrol.SubjectKindServiceAccount, + ServiceAccount: &flowcontrol.ServiceAccountSubject{ + Name: name, + Namespace: metav1.NamespaceSystem, + }, + }) + } + return subjects +} + +func resourceRule(verbs []string, groups []string, resources []string, namespaces []string, clusterScoped bool) flowcontrol.ResourcePolicyRule { + return flowcontrol.ResourcePolicyRule{ + Verbs: verbs, + APIGroups: groups, + Resources: resources, + Namespaces: namespaces, + ClusterScope: clusterScoped, + } +} + +func nonResourceRule(verbs []string, nonResourceURLs []string) flowcontrol.NonResourcePolicyRule { + return flowcontrol.NonResourcePolicyRule{Verbs: verbs, NonResourceURLs: nonResourceURLs} +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/OWNERS b/vendor/k8s.io/apiserver/pkg/audit/OWNERS new file mode 100644 index 0000000000..c3486ad5de --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-auth-audit-approvers +reviewers: +- sig-auth-audit-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/apiserver/pkg/audit/context.go b/vendor/k8s.io/apiserver/pkg/audit/context.go new file mode 100644 index 0000000000..3d616bbd4c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/context.go @@ -0,0 +1,84 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "context" + + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +// The key type is unexported to prevent collisions +type key int + +const ( + // auditAnnotationsKey is the context key for the audit annotations. + auditAnnotationsKey key = iota +) + +// annotations = *[]annotation instead of a map to preserve order of insertions +type annotation struct { + key, value string +} + +// WithAuditAnnotations returns a new context that can store audit annotations +// via the AddAuditAnnotation function. This function is meant to be called from +// an early request handler to allow all later layers to set audit annotations. +// This is required to support flows where handlers that come before WithAudit +// (such as WithAuthentication) wish to set audit annotations. +func WithAuditAnnotations(parent context.Context) context.Context { + // this should never really happen, but prevent double registration of this slice + if _, ok := parent.Value(auditAnnotationsKey).(*[]annotation); ok { + return parent + } + + var annotations []annotation // avoid allocations until we actually need it + return genericapirequest.WithValue(parent, auditAnnotationsKey, &annotations) +} + +// AddAuditAnnotation sets the audit annotation for the given key, value pair. +// It is safe to call at most parts of request flow that come after WithAuditAnnotations. +// The notable exception being that this function must not be called via a +// defer statement (i.e. after ServeHTTP) in a handler that runs before WithAudit +// as at that point the audit event has already been sent to the audit sink. +// Handlers that are unaware of their position in the overall request flow should +// prefer AddAuditAnnotation over LogAnnotation to avoid dropping annotations. +func AddAuditAnnotation(ctx context.Context, key, value string) { + // use the audit event directly if we have it + if ae := genericapirequest.AuditEventFrom(ctx); ae != nil { + LogAnnotation(ae, key, value) + return + } + + annotations, ok := ctx.Value(auditAnnotationsKey).(*[]annotation) + if !ok { + return // adding audit annotation is not supported at this call site + } + + *annotations = append(*annotations, annotation{key: key, value: value}) +} + +// This is private to prevent reads/write to the slice from outside of this package. +// The audit event should be directly read to get access to the annotations. +func auditAnnotationsFrom(ctx context.Context) []annotation { + annotations, ok := ctx.Value(auditAnnotationsKey).(*[]annotation) + if !ok { + return nil // adding audit annotation is not supported at this call site + } + + return *annotations +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/format.go b/vendor/k8s.io/apiserver/pkg/audit/format.go new file mode 100644 index 0000000000..bf805f5257 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/format.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "fmt" + "strconv" + "strings" + "time" + + auditinternal "k8s.io/apiserver/pkg/apis/audit" +) + +// EventString creates a 1-line text representation of an audit event, using a subset of the +// information in the event struct. +func EventString(ev *auditinternal.Event) string { + username := "" + groups := "" + if len(ev.User.Username) > 0 { + username = ev.User.Username + if len(ev.User.Groups) > 0 { + groups = auditStringSlice(ev.User.Groups) + } + } + asuser := "" + asgroups := "" + if ev.ImpersonatedUser != nil { + asuser = ev.ImpersonatedUser.Username + if ev.ImpersonatedUser.Groups != nil { + asgroups = auditStringSlice(ev.ImpersonatedUser.Groups) + } + } + + namespace := "" + if ev.ObjectRef != nil && len(ev.ObjectRef.Namespace) != 0 { + namespace = ev.ObjectRef.Namespace + } + + response := "" + if ev.ResponseStatus != nil { + response = strconv.Itoa(int(ev.ResponseStatus.Code)) + } + + ip := "" + if len(ev.SourceIPs) > 0 { + ip = ev.SourceIPs[0] + } + + return fmt.Sprintf("%s AUDIT: id=%q stage=%q ip=%q method=%q user=%q groups=%q as=%q asgroups=%q namespace=%q uri=%q response=\"%s\"", + ev.RequestReceivedTimestamp.Format(time.RFC3339Nano), ev.AuditID, ev.Stage, ip, ev.Verb, username, groups, asuser, asgroups, namespace, ev.RequestURI, response) +} + +func auditStringSlice(inList []string) string { + quotedElements := make([]string, len(inList)) + for i, in := range inList { + quotedElements[i] = fmt.Sprintf("%q", in) + } + return strings.Join(quotedElements, ",") +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/metrics.go b/vendor/k8s.io/apiserver/pkg/audit/metrics.go new file mode 100644 index 0000000000..96166e6545 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/metrics.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "fmt" + + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" +) + +const ( + subsystem = "apiserver_audit" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + eventCounter = metrics.NewCounter( + &metrics.CounterOpts{ + Subsystem: subsystem, + Name: "event_total", + Help: "Counter of audit events generated and sent to the audit backend.", + StabilityLevel: metrics.ALPHA, + }) + errorCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: subsystem, + Name: "error_total", + Help: "Counter of audit events that failed to be audited properly. " + + "Plugin identifies the plugin affected by the error.", + StabilityLevel: metrics.ALPHA, + }, + []string{"plugin"}, + ) + levelCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: subsystem, + Name: "level_total", + Help: "Counter of policy levels for audit events (1 per request).", + StabilityLevel: metrics.ALPHA, + }, + []string{"level"}, + ) + + ApiserverAuditDroppedCounter = metrics.NewCounter( + &metrics.CounterOpts{ + Subsystem: subsystem, + Name: "requests_rejected_total", + Help: "Counter of apiserver requests rejected due to an error " + + "in audit logging backend.", + StabilityLevel: metrics.ALPHA, + }, + ) +) + +func init() { + legacyregistry.MustRegister(eventCounter) + legacyregistry.MustRegister(errorCounter) + legacyregistry.MustRegister(levelCounter) + legacyregistry.MustRegister(ApiserverAuditDroppedCounter) +} + +// ObserveEvent updates the relevant prometheus metrics for the generated audit event. +func ObserveEvent() { + eventCounter.Inc() +} + +// ObservePolicyLevel updates the relevant prometheus metrics with the audit level for a request. +func ObservePolicyLevel(level auditinternal.Level) { + levelCounter.WithLabelValues(string(level)).Inc() +} + +// HandlePluginError handles an error that occurred in an audit plugin. This method should only be +// used if the error may have prevented the audit event from being properly recorded. The events are +// logged to the debug log. +func HandlePluginError(plugin string, err error, impacted ...*auditinternal.Event) { + // Count the error. + errorCounter.WithLabelValues(plugin).Add(float64(len(impacted))) + + // Log the audit events to the debug log. + msg := fmt.Sprintf("Error in audit plugin '%s' affecting %d audit events: %v\nImpacted events:\n", + plugin, len(impacted), err) + for _, ev := range impacted { + msg = msg + EventString(ev) + "\n" + } + klog.Error(msg) +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go b/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go new file mode 100644 index 0000000000..e9a901abf6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go @@ -0,0 +1,219 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "strings" + + "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +const ( + // DefaultAuditLevel is the default level to audit at, if no policy rules are matched. + DefaultAuditLevel = audit.LevelNone +) + +// Checker exposes methods for checking the policy rules. +type Checker interface { + // Check the audit level for a request with the given authorizer attributes. + LevelAndStages(authorizer.Attributes) (audit.Level, []audit.Stage) +} + +// NewChecker creates a new policy checker. +func NewChecker(policy *audit.Policy) Checker { + for i, rule := range policy.Rules { + policy.Rules[i].OmitStages = unionStages(policy.OmitStages, rule.OmitStages) + } + return &policyChecker{*policy} +} + +func unionStages(stageLists ...[]audit.Stage) []audit.Stage { + m := make(map[audit.Stage]bool) + for _, sl := range stageLists { + for _, s := range sl { + m[s] = true + } + } + result := make([]audit.Stage, 0, len(m)) + for key := range m { + result = append(result, key) + } + return result +} + +// FakeChecker creates a checker that returns a constant level for all requests (for testing). +func FakeChecker(level audit.Level, stage []audit.Stage) Checker { + return &fakeChecker{level, stage} +} + +type policyChecker struct { + audit.Policy +} + +func (p *policyChecker) LevelAndStages(attrs authorizer.Attributes) (audit.Level, []audit.Stage) { + for _, rule := range p.Rules { + if ruleMatches(&rule, attrs) { + return rule.Level, rule.OmitStages + } + } + return DefaultAuditLevel, p.OmitStages +} + +// Check whether the rule matches the request attrs. +func ruleMatches(r *audit.PolicyRule, attrs authorizer.Attributes) bool { + user := attrs.GetUser() + if len(r.Users) > 0 { + if user == nil || !hasString(r.Users, user.GetName()) { + return false + } + } + if len(r.UserGroups) > 0 { + if user == nil { + return false + } + matched := false + for _, group := range user.GetGroups() { + if hasString(r.UserGroups, group) { + matched = true + break + } + } + if !matched { + return false + } + } + if len(r.Verbs) > 0 { + if !hasString(r.Verbs, attrs.GetVerb()) { + return false + } + } + + if len(r.Namespaces) > 0 || len(r.Resources) > 0 { + return ruleMatchesResource(r, attrs) + } + + if len(r.NonResourceURLs) > 0 { + return ruleMatchesNonResource(r, attrs) + } + + return true +} + +// Check whether the rule's non-resource URLs match the request attrs. +func ruleMatchesNonResource(r *audit.PolicyRule, attrs authorizer.Attributes) bool { + if attrs.IsResourceRequest() { + return false + } + + path := attrs.GetPath() + for _, spec := range r.NonResourceURLs { + if pathMatches(path, spec) { + return true + } + } + + return false +} + +// Check whether the path matches the path specification. +func pathMatches(path, spec string) bool { + // Allow wildcard match + if spec == "*" { + return true + } + // Allow exact match + if spec == path { + return true + } + // Allow a trailing * subpath match + if strings.HasSuffix(spec, "*") && strings.HasPrefix(path, strings.TrimRight(spec, "*")) { + return true + } + return false +} + +// Check whether the rule's resource fields match the request attrs. +func ruleMatchesResource(r *audit.PolicyRule, attrs authorizer.Attributes) bool { + if !attrs.IsResourceRequest() { + return false + } + + if len(r.Namespaces) > 0 { + if !hasString(r.Namespaces, attrs.GetNamespace()) { // Non-namespaced resources use the empty string. + return false + } + } + if len(r.Resources) == 0 { + return true + } + + apiGroup := attrs.GetAPIGroup() + resource := attrs.GetResource() + subresource := attrs.GetSubresource() + combinedResource := resource + // If subresource, the resource in the policy must match "(resource)/(subresource)" + if subresource != "" { + combinedResource = resource + "/" + subresource + } + + name := attrs.GetName() + + for _, gr := range r.Resources { + if gr.Group == apiGroup { + if len(gr.Resources) == 0 { + return true + } + for _, res := range gr.Resources { + if len(gr.ResourceNames) == 0 || hasString(gr.ResourceNames, name) { + // match "*" + if res == combinedResource || res == "*" { + return true + } + // match "*/subresource" + if len(subresource) > 0 && strings.HasPrefix(res, "*/") && subresource == strings.TrimPrefix(res, "*/") { + return true + } + // match "resource/*" + if strings.HasSuffix(res, "/*") && resource == strings.TrimSuffix(res, "/*") { + return true + } + } + } + } + } + return false +} + +// Utility function to check whether a string slice contains a string. +func hasString(slice []string, value string) bool { + for _, s := range slice { + if s == value { + return true + } + } + return false +} + +type fakeChecker struct { + level audit.Level + stage []audit.Stage +} + +func (f *fakeChecker) LevelAndStages(_ authorizer.Attributes) (audit.Level, []audit.Stage) { + return f.level, f.stage +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/enforce.go b/vendor/k8s.io/apiserver/pkg/audit/policy/enforce.go new file mode 100644 index 0000000000..e2b107b9f1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/enforce.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "fmt" + + "k8s.io/apiserver/pkg/apis/audit" +) + +// EnforcePolicy drops any part of the event that doesn't conform to a policy level +// or omitStages and sets the event level accordingly +func EnforcePolicy(event *audit.Event, level audit.Level, omitStages []audit.Stage) (*audit.Event, error) { + for _, stage := range omitStages { + if event.Stage == stage { + return nil, nil + } + } + return enforceLevel(event, level) +} + +func enforceLevel(event *audit.Event, level audit.Level) (*audit.Event, error) { + switch level { + case audit.LevelMetadata: + event.Level = audit.LevelMetadata + event.ResponseObject = nil + event.RequestObject = nil + case audit.LevelRequest: + event.Level = audit.LevelRequest + event.ResponseObject = nil + case audit.LevelRequestResponse: + event.Level = audit.LevelRequestResponse + case audit.LevelNone: + return nil, nil + default: + return nil, fmt.Errorf("level unknown: %s", level) + } + return event, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go b/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go new file mode 100644 index 0000000000..81b800fd69 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go @@ -0,0 +1,90 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "fmt" + "io/ioutil" + + "k8s.io/apimachinery/pkg/runtime/schema" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + auditv1 "k8s.io/apiserver/pkg/apis/audit/v1" + auditv1alpha1 "k8s.io/apiserver/pkg/apis/audit/v1alpha1" + auditv1beta1 "k8s.io/apiserver/pkg/apis/audit/v1beta1" + "k8s.io/apiserver/pkg/apis/audit/validation" + "k8s.io/apiserver/pkg/audit" + + "k8s.io/klog/v2" +) + +var ( + apiGroupVersions = []schema.GroupVersion{ + auditv1beta1.SchemeGroupVersion, + auditv1alpha1.SchemeGroupVersion, + auditv1.SchemeGroupVersion, + } + apiGroupVersionSet = map[schema.GroupVersion]bool{} +) + +func init() { + for _, gv := range apiGroupVersions { + apiGroupVersionSet[gv] = true + } +} + +func LoadPolicyFromFile(filePath string) (*auditinternal.Policy, error) { + if filePath == "" { + return nil, fmt.Errorf("file path not specified") + } + policyDef, err := ioutil.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("failed to read file path %q: %+v", filePath, err) + } + + ret, err := LoadPolicyFromBytes(policyDef) + if err != nil { + return nil, fmt.Errorf("%v: from file %v", err.Error(), filePath) + } + + return ret, nil +} + +func LoadPolicyFromBytes(policyDef []byte) (*auditinternal.Policy, error) { + policy := &auditinternal.Policy{} + decoder := audit.Codecs.UniversalDecoder(apiGroupVersions...) + + _, gvk, err := decoder.Decode(policyDef, nil, policy) + if err != nil { + return nil, fmt.Errorf("failed decoding: %v", err) + } + + // Ensure the policy file contained an apiVersion and kind. + if !apiGroupVersionSet[schema.GroupVersion{Group: gvk.Group, Version: gvk.Version}] { + return nil, fmt.Errorf("unknown group version field %v in policy", gvk) + } + + if err := validation.ValidatePolicy(policy); err != nil { + return nil, err.ToAggregate() + } + + policyCnt := len(policy.Rules) + if policyCnt == 0 { + return nil, fmt.Errorf("loaded illegal policy with 0 rules") + } + klog.V(4).Infof("Loaded %d audit policy rules", policyCnt) + return policy, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/util.go b/vendor/k8s.io/apiserver/pkg/audit/policy/util.go new file mode 100644 index 0000000000..29be912303 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/apis/audit" +) + +// AllStages returns all possible stages +func AllStages() sets.String { + return sets.NewString( + audit.StageRequestReceived, + audit.StageResponseStarted, + audit.StageResponseComplete, + audit.StagePanic, + ) +} + +// AllLevels returns all possible levels +func AllLevels() sets.String { + return sets.NewString( + string(audit.LevelNone), + string(audit.LevelMetadata), + string(audit.LevelRequest), + string(audit.LevelRequestResponse), + ) +} + +// InvertStages subtracts the given array of stages from all stages +func InvertStages(stages []audit.Stage) []audit.Stage { + s := ConvertStagesToStrings(stages) + a := AllStages() + a.Delete(s...) + return ConvertStringSetToStages(a) +} + +// ConvertStagesToStrings converts an array of stages to a string array +func ConvertStagesToStrings(stages []audit.Stage) []string { + s := make([]string, len(stages)) + for i, stage := range stages { + s[i] = string(stage) + } + return s +} + +// ConvertStringSetToStages converts a string set to an array of stages +func ConvertStringSetToStages(set sets.String) []audit.Stage { + stages := make([]audit.Stage, len(set)) + for i, stage := range set.List() { + stages[i] = audit.Stage(stage) + } + return stages +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/request.go b/vendor/k8s.io/apiserver/pkg/audit/request.go new file mode 100644 index 0000000000..205bf25c8b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/request.go @@ -0,0 +1,246 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "bytes" + "fmt" + "net/http" + "reflect" + "time" + + "github.com/google/uuid" + "k8s.io/klog/v2" + + authnv1 "k8s.io/api/authentication/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilnet "k8s.io/apimachinery/pkg/util/net" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +const ( + maxUserAgentLength = 1024 + userAgentTruncateSuffix = "...TRUNCATED" +) + +func NewEventFromRequest(req *http.Request, requestReceivedTimestamp time.Time, level auditinternal.Level, attribs authorizer.Attributes) (*auditinternal.Event, error) { + ev := &auditinternal.Event{ + RequestReceivedTimestamp: metav1.NewMicroTime(requestReceivedTimestamp), + Verb: attribs.GetVerb(), + RequestURI: req.URL.RequestURI(), + UserAgent: maybeTruncateUserAgent(req), + Level: level, + } + + // prefer the id from the headers. If not available, create a new one. + // TODO(audit): do we want to forbid the header for non-front-proxy users? + ids := req.Header.Get(auditinternal.HeaderAuditID) + if ids != "" { + ev.AuditID = types.UID(ids) + } else { + ev.AuditID = types.UID(uuid.New().String()) + } + + ips := utilnet.SourceIPs(req) + ev.SourceIPs = make([]string, len(ips)) + for i := range ips { + ev.SourceIPs[i] = ips[i].String() + } + + if user := attribs.GetUser(); user != nil { + ev.User.Username = user.GetName() + ev.User.Extra = map[string]authnv1.ExtraValue{} + for k, v := range user.GetExtra() { + ev.User.Extra[k] = authnv1.ExtraValue(v) + } + ev.User.Groups = user.GetGroups() + ev.User.UID = user.GetUID() + } + + if attribs.IsResourceRequest() { + ev.ObjectRef = &auditinternal.ObjectReference{ + Namespace: attribs.GetNamespace(), + Name: attribs.GetName(), + Resource: attribs.GetResource(), + Subresource: attribs.GetSubresource(), + APIGroup: attribs.GetAPIGroup(), + APIVersion: attribs.GetAPIVersion(), + } + } + + for _, kv := range auditAnnotationsFrom(req.Context()) { + LogAnnotation(ev, kv.key, kv.value) + } + + return ev, nil +} + +// LogImpersonatedUser fills in the impersonated user attributes into an audit event. +func LogImpersonatedUser(ae *auditinternal.Event, user user.Info) { + if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { + return + } + ae.ImpersonatedUser = &authnv1.UserInfo{ + Username: user.GetName(), + } + ae.ImpersonatedUser.Groups = user.GetGroups() + ae.ImpersonatedUser.UID = user.GetUID() + ae.ImpersonatedUser.Extra = map[string]authnv1.ExtraValue{} + for k, v := range user.GetExtra() { + ae.ImpersonatedUser.Extra[k] = authnv1.ExtraValue(v) + } +} + +// LogRequestObject fills in the request object into an audit event. The passed runtime.Object +// will be converted to the given gv. +func LogRequestObject(ae *auditinternal.Event, obj runtime.Object, gvr schema.GroupVersionResource, subresource string, s runtime.NegotiatedSerializer) { + if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { + return + } + + // complete ObjectRef + if ae.ObjectRef == nil { + ae.ObjectRef = &auditinternal.ObjectReference{} + } + + // meta.Accessor is more general than ObjectMetaAccessor, but if it fails, we can just skip setting these bits + if meta, err := meta.Accessor(obj); err == nil { + if len(ae.ObjectRef.Namespace) == 0 { + ae.ObjectRef.Namespace = meta.GetNamespace() + } + if len(ae.ObjectRef.Name) == 0 { + ae.ObjectRef.Name = meta.GetName() + } + if len(ae.ObjectRef.UID) == 0 { + ae.ObjectRef.UID = meta.GetUID() + } + if len(ae.ObjectRef.ResourceVersion) == 0 { + ae.ObjectRef.ResourceVersion = meta.GetResourceVersion() + } + } + if len(ae.ObjectRef.APIVersion) == 0 { + ae.ObjectRef.APIGroup = gvr.Group + ae.ObjectRef.APIVersion = gvr.Version + } + if len(ae.ObjectRef.Resource) == 0 { + ae.ObjectRef.Resource = gvr.Resource + } + if len(ae.ObjectRef.Subresource) == 0 { + ae.ObjectRef.Subresource = subresource + } + + if ae.Level.Less(auditinternal.LevelRequest) { + return + } + + // TODO(audit): hook into the serializer to avoid double conversion + var err error + ae.RequestObject, err = encodeObject(obj, gvr.GroupVersion(), s) + if err != nil { + // TODO(audit): add error slice to audit event struct + klog.Warningf("Auditing failed of %v request: %v", reflect.TypeOf(obj).Name(), err) + return + } +} + +// LogRequestPatch fills in the given patch as the request object into an audit event. +func LogRequestPatch(ae *auditinternal.Event, patch []byte) { + if ae == nil || ae.Level.Less(auditinternal.LevelRequest) { + return + } + + ae.RequestObject = &runtime.Unknown{ + Raw: patch, + ContentType: runtime.ContentTypeJSON, + } +} + +// LogResponseObject fills in the response object into an audit event. The passed runtime.Object +// will be converted to the given gv. +func LogResponseObject(ae *auditinternal.Event, obj runtime.Object, gv schema.GroupVersion, s runtime.NegotiatedSerializer) { + if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { + return + } + if status, ok := obj.(*metav1.Status); ok { + // selectively copy the bounded fields. + ae.ResponseStatus = &metav1.Status{ + Status: status.Status, + Reason: status.Reason, + Code: status.Code, + } + } + + if ae.Level.Less(auditinternal.LevelRequestResponse) { + return + } + // TODO(audit): hook into the serializer to avoid double conversion + var err error + ae.ResponseObject, err = encodeObject(obj, gv, s) + if err != nil { + klog.Warningf("Audit failed for %q response: %v", reflect.TypeOf(obj).Name(), err) + } +} + +func encodeObject(obj runtime.Object, gv schema.GroupVersion, serializer runtime.NegotiatedSerializer) (*runtime.Unknown, error) { + const mediaType = runtime.ContentTypeJSON + info, ok := runtime.SerializerInfoForMediaType(serializer.SupportedMediaTypes(), mediaType) + if !ok { + return nil, fmt.Errorf("unable to locate encoder -- %q is not a supported media type", mediaType) + } + + enc := serializer.EncoderForVersion(info.Serializer, gv) + var buf bytes.Buffer + if err := enc.Encode(obj, &buf); err != nil { + return nil, fmt.Errorf("encoding failed: %v", err) + } + + return &runtime.Unknown{ + Raw: buf.Bytes(), + ContentType: runtime.ContentTypeJSON, + }, nil +} + +// LogAnnotation fills in the Annotations according to the key value pair. +func LogAnnotation(ae *auditinternal.Event, key, value string) { + if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { + return + } + if ae.Annotations == nil { + ae.Annotations = make(map[string]string) + } + if v, ok := ae.Annotations[key]; ok && v != value { + klog.Warningf("Failed to set annotations[%q] to %q for audit:%q, it has already been set to %q", key, value, ae.AuditID, ae.Annotations[key]) + return + } + ae.Annotations[key] = value +} + +// truncate User-Agent if too long, otherwise return it directly. +func maybeTruncateUserAgent(req *http.Request) string { + ua := req.UserAgent() + if len(ua) > maxUserAgentLength { + ua = ua[:maxUserAgentLength] + userAgentTruncateSuffix + } + + return ua +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/scheme.go b/vendor/k8s.io/apiserver/pkg/audit/scheme.go new file mode 100644 index 0000000000..031759ec75 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/scheme.go @@ -0,0 +1,42 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: Delete this file if we generate a clientset. +package audit + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/apis/audit/v1" + "k8s.io/apiserver/pkg/apis/audit/v1alpha1" + "k8s.io/apiserver/pkg/apis/audit/v1beta1" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) + +func init() { + metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(v1.AddToScheme(Scheme)) + utilruntime.Must(v1alpha1.AddToScheme(Scheme)) + utilruntime.Must(v1beta1.AddToScheme(Scheme)) + utilruntime.Must(auditinternal.AddToScheme(Scheme)) + utilruntime.Must(Scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion)) +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/types.go b/vendor/k8s.io/apiserver/pkg/audit/types.go new file mode 100644 index 0000000000..b78bd086b0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/types.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + auditinternal "k8s.io/apiserver/pkg/apis/audit" +) + +type Sink interface { + // ProcessEvents handles events. Per audit ID it might be that ProcessEvents is called up to three times. + // Errors might be logged by the sink itself. If an error should be fatal, leading to an internal + // error, ProcessEvents is supposed to panic. The event must not be mutated and is reused by the caller + // after the call returns, i.e. the sink has to make a deepcopy to keep a copy around if necessary. + // Returns true on success, may return false on error. + ProcessEvents(events ...*auditinternal.Event) bool +} + +type Backend interface { + Sink + + // Run will initialize the backend. It must not block, but may run go routines in the background. If + // stopCh is closed, it is supposed to stop them. Run will be called before the first call to ProcessEvents. + Run(stopCh <-chan struct{}) error + + // Shutdown will synchronously shut down the backend while making sure that all pending + // events are delivered. It can be assumed that this method is called after + // the stopCh channel passed to the Run method has been closed. + Shutdown() + + // Returns the backend PluginName. + String() string +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/union.go b/vendor/k8s.io/apiserver/pkg/audit/union.go new file mode 100644 index 0000000000..39dd74f740 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/union.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/errors" + auditinternal "k8s.io/apiserver/pkg/apis/audit" +) + +// Union returns an audit Backend which logs events to a set of backends. The returned +// Sink implementation blocks in turn for each call to ProcessEvents. +func Union(backends ...Backend) Backend { + if len(backends) == 1 { + return backends[0] + } + return union{backends} +} + +type union struct { + backends []Backend +} + +func (u union) ProcessEvents(events ...*auditinternal.Event) bool { + success := true + for _, backend := range u.backends { + success = backend.ProcessEvents(events...) && success + } + return success +} + +func (u union) Run(stopCh <-chan struct{}) error { + var funcs []func() error + for _, backend := range u.backends { + funcs = append(funcs, func() error { + return backend.Run(stopCh) + }) + } + return errors.AggregateGoroutines(funcs...) +} + +func (u union) Shutdown() { + for _, backend := range u.backends { + backend.Shutdown() + } +} + +func (u union) String() string { + var backendStrings []string + for _, backend := range u.backends { + backendStrings = append(backendStrings, fmt.Sprintf("%s", backend)) + } + return fmt.Sprintf("union[%s]", strings.Join(backendStrings, ",")) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go new file mode 100644 index 0000000000..bcf7eb4bc9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import ( + "context" + "fmt" + "net/http" +) + +func authenticate(ctx context.Context, implicitAuds Audiences, authenticate func() (*Response, bool, error)) (*Response, bool, error) { + targetAuds, ok := AudiencesFrom(ctx) + // We can remove this once api audiences is never empty. That will probably + // be N releases after TokenRequest is GA. + if !ok { + return authenticate() + } + auds := implicitAuds.Intersect(targetAuds) + if len(auds) == 0 { + return nil, false, nil + } + resp, ok, err := authenticate() + if err != nil || !ok { + return nil, false, err + } + if len(resp.Audiences) > 0 { + // maybe the authenticator was audience aware after all. + return nil, false, fmt.Errorf("audience agnostic authenticator wrapped an authenticator that returned audiences: %q", resp.Audiences) + } + resp.Audiences = auds + return resp, true, nil +} + +type audAgnosticRequestAuthenticator struct { + implicit Audiences + delegate Request +} + +var _ = Request(&audAgnosticRequestAuthenticator{}) + +func (a *audAgnosticRequestAuthenticator) AuthenticateRequest(req *http.Request) (*Response, bool, error) { + return authenticate(req.Context(), a.implicit, func() (*Response, bool, error) { + return a.delegate.AuthenticateRequest(req) + }) +} + +// WrapAudienceAgnosticRequest wraps an audience agnostic request authenticator +// to restrict its accepted audiences to a set of implicit audiences. +func WrapAudienceAgnosticRequest(implicit Audiences, delegate Request) Request { + return &audAgnosticRequestAuthenticator{ + implicit: implicit, + delegate: delegate, + } +} + +type audAgnosticTokenAuthenticator struct { + implicit Audiences + delegate Token +} + +var _ = Token(&audAgnosticTokenAuthenticator{}) + +func (a *audAgnosticTokenAuthenticator) AuthenticateToken(ctx context.Context, tok string) (*Response, bool, error) { + return authenticate(ctx, a.implicit, func() (*Response, bool, error) { + return a.delegate.AuthenticateToken(ctx, tok) + }) +} + +// WrapAudienceAgnosticToken wraps an audience agnostic token authenticator to +// restrict its accepted audiences to a set of implicit audiences. +func WrapAudienceAgnosticToken(implicit Audiences, delegate Token) Token { + return &audAgnosticTokenAuthenticator{ + implicit: implicit, + delegate: delegate, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go new file mode 100644 index 0000000000..2a3a918896 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import "context" + +// Audiences is a container for the Audiences of a token. +type Audiences []string + +// The key type is unexported to prevent collisions +type key int + +const ( + // audiencesKey is the context key for request audiences. + audiencesKey key = iota +) + +// WithAudiences returns a context that stores a request's expected audiences. +func WithAudiences(ctx context.Context, auds Audiences) context.Context { + return context.WithValue(ctx, audiencesKey, auds) +} + +// AudiencesFrom returns a request's expected audiences stored in the request context. +func AudiencesFrom(ctx context.Context) (Audiences, bool) { + auds, ok := ctx.Value(audiencesKey).(Audiences) + return auds, ok +} + +// Has checks if Audiences contains a specific audiences. +func (a Audiences) Has(taud string) bool { + for _, aud := range a { + if aud == taud { + return true + } + } + return false +} + +// Intersect intersects Audiences with a target Audiences and returns all +// elements in both. +func (a Audiences) Intersect(tauds Audiences) Audiences { + selected := Audiences{} + for _, taud := range tauds { + if a.Has(taud) { + selected = append(selected, taud) + } + } + return selected +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go new file mode 100644 index 0000000000..8ff979b807 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go @@ -0,0 +1,65 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import ( + "context" + "net/http" + + "k8s.io/apiserver/pkg/authentication/user" +) + +// Token checks a string value against a backing authentication store and +// returns a Response or an error if the token could not be checked. +type Token interface { + AuthenticateToken(ctx context.Context, token string) (*Response, bool, error) +} + +// Request attempts to extract authentication information from a request and +// returns a Response or an error if the request could not be checked. +type Request interface { + AuthenticateRequest(req *http.Request) (*Response, bool, error) +} + +// TokenFunc is a function that implements the Token interface. +type TokenFunc func(ctx context.Context, token string) (*Response, bool, error) + +// AuthenticateToken implements authenticator.Token. +func (f TokenFunc) AuthenticateToken(ctx context.Context, token string) (*Response, bool, error) { + return f(ctx, token) +} + +// RequestFunc is a function that implements the Request interface. +type RequestFunc func(req *http.Request) (*Response, bool, error) + +// AuthenticateRequest implements authenticator.Request. +func (f RequestFunc) AuthenticateRequest(req *http.Request) (*Response, bool, error) { + return f(req) +} + +// Response is the struct returned by authenticator interfaces upon successful +// authentication. It contains information about whether the authenticator +// authenticated the request, information about the context of the +// authentication, and information about the authenticated user. +type Response struct { + // Audiences is the set of audiences the authenticator was able to validate + // the token against. If the authenticator is not audience aware, this field + // will be empty. + Audiences Audiences + // User is the UserInfo associated with the authentication context. + User user.Info +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go new file mode 100644 index 0000000000..83697bb547 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go @@ -0,0 +1,120 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticatorfactory + +import ( + "errors" + "time" + + "github.com/go-openapi/spec" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/group" + "k8s.io/apiserver/pkg/authentication/request/anonymous" + "k8s.io/apiserver/pkg/authentication/request/bearertoken" + "k8s.io/apiserver/pkg/authentication/request/headerrequest" + unionauth "k8s.io/apiserver/pkg/authentication/request/union" + "k8s.io/apiserver/pkg/authentication/request/websocket" + "k8s.io/apiserver/pkg/authentication/request/x509" + "k8s.io/apiserver/pkg/authentication/token/cache" + webhooktoken "k8s.io/apiserver/plugin/pkg/authenticator/token/webhook" + authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1" +) + +// DelegatingAuthenticatorConfig is the minimal configuration needed to create an authenticator +// built to delegate authentication to a kube API server +type DelegatingAuthenticatorConfig struct { + Anonymous bool + + // TokenAccessReviewClient is a client to do token review. It can be nil. Then every token is ignored. + TokenAccessReviewClient authenticationclient.TokenReviewInterface + + // WebhookRetryBackoff specifies the backoff parameters for the authentication webhook retry logic. + // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed + // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. + WebhookRetryBackoff *wait.Backoff + + // CacheTTL is the length of time that a token authentication answer will be cached. + CacheTTL time.Duration + + // CAContentProvider are the options for verifying incoming connections using mTLS and directly assigning to users. + // Generally this is the CA bundle file used to authenticate client certificates + // If this is nil, then mTLS will not be used. + ClientCertificateCAContentProvider CAContentProvider + + APIAudiences authenticator.Audiences + + RequestHeaderConfig *RequestHeaderConfig +} + +func (c DelegatingAuthenticatorConfig) New() (authenticator.Request, *spec.SecurityDefinitions, error) { + authenticators := []authenticator.Request{} + securityDefinitions := spec.SecurityDefinitions{} + + // front-proxy first, then remote + // Add the front proxy authenticator if requested + if c.RequestHeaderConfig != nil { + requestHeaderAuthenticator := headerrequest.NewDynamicVerifyOptionsSecure( + c.RequestHeaderConfig.CAContentProvider.VerifyOptions, + c.RequestHeaderConfig.AllowedClientNames, + c.RequestHeaderConfig.UsernameHeaders, + c.RequestHeaderConfig.GroupHeaders, + c.RequestHeaderConfig.ExtraHeaderPrefixes, + ) + authenticators = append(authenticators, requestHeaderAuthenticator) + } + + // x509 client cert auth + if c.ClientCertificateCAContentProvider != nil { + authenticators = append(authenticators, x509.NewDynamic(c.ClientCertificateCAContentProvider.VerifyOptions, x509.CommonNameUserConversion)) + } + + if c.TokenAccessReviewClient != nil { + if c.WebhookRetryBackoff == nil { + return nil, nil, errors.New("retry backoff parameters for delegating authentication webhook has not been specified") + } + tokenAuth, err := webhooktoken.NewFromInterface(c.TokenAccessReviewClient, c.APIAudiences, *c.WebhookRetryBackoff) + if err != nil { + return nil, nil, err + } + cachingTokenAuth := cache.New(tokenAuth, false, c.CacheTTL, c.CacheTTL) + authenticators = append(authenticators, bearertoken.New(cachingTokenAuth), websocket.NewProtocolAuthenticator(cachingTokenAuth)) + + securityDefinitions["BearerToken"] = &spec.SecurityScheme{ + SecuritySchemeProps: spec.SecuritySchemeProps{ + Type: "apiKey", + Name: "authorization", + In: "header", + Description: "Bearer Token authentication", + }, + } + } + + if len(authenticators) == 0 { + if c.Anonymous { + return anonymous.NewAuthenticator(), &securityDefinitions, nil + } + return nil, nil, errors.New("No authentication method configured") + } + + authenticator := group.NewAuthenticatedGroupAdder(unionauth.New(authenticators...)) + if c.Anonymous { + authenticator = unionauth.NewFailOnError(authenticator, anonymous.NewAuthenticator()) + } + return authenticator, &securityDefinitions, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/loopback.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/loopback.go new file mode 100644 index 0000000000..f31656529f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/loopback.go @@ -0,0 +1,29 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticatorfactory + +import ( + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/request/bearertoken" + "k8s.io/apiserver/pkg/authentication/token/tokenfile" + "k8s.io/apiserver/pkg/authentication/user" +) + +// NewFromTokens returns an authenticator.Request or an error +func NewFromTokens(tokens map[string]*user.DefaultInfo) authenticator.Request { + return bearertoken.New(tokenfile.New(tokens)) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go new file mode 100644 index 0000000000..b5aa1c524a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go @@ -0,0 +1,48 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticatorfactory + +import ( + "crypto/x509" + + "k8s.io/apiserver/pkg/authentication/request/headerrequest" +) + +type RequestHeaderConfig struct { + // UsernameHeaders are the headers to check (in order, case-insensitively) for an identity. The first header with a value wins. + UsernameHeaders headerrequest.StringSliceProvider + // GroupHeaders are the headers to check (case-insensitively) for a group names. All values will be used. + GroupHeaders headerrequest.StringSliceProvider + // ExtraHeaderPrefixes are the head prefixes to check (case-insentively) for filling in + // the user.Info.Extra. All values of all matching headers will be added. + ExtraHeaderPrefixes headerrequest.StringSliceProvider + // CAContentProvider the options for verifying incoming connections using mTLS. Generally this points to CA bundle file which is used verify the identity of the front proxy. + // It may produce different options at will. + CAContentProvider CAContentProvider + // AllowedClientNames is a list of common names that may be presented by the authenticating front proxy. Empty means: accept any. + AllowedClientNames headerrequest.StringSliceProvider +} + +// CAContentProvider provides ca bundle byte content +type CAContentProvider interface { + // Name is just an identifier + Name() string + // CurrentCABundleContent provides ca bundle byte content + CurrentCABundleContent() []byte + // VerifyOptions provides VerifyOptions for authenticators + VerifyOptions() (x509.VerifyOptions, bool) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go b/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go new file mode 100644 index 0000000000..5ac6b2ddf9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go @@ -0,0 +1,61 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "net/http" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" +) + +// AuthenticatedGroupAdder adds system:authenticated group when appropriate +type AuthenticatedGroupAdder struct { + // Authenticator is delegated to make the authentication decision + Authenticator authenticator.Request +} + +// NewAuthenticatedGroupAdder wraps a request authenticator, and adds the system:authenticated group when appropriate. +// Authentication must succeed, the user must not be system:anonymous, the groups system:authenticated or system:unauthenticated must +// not be present +func NewAuthenticatedGroupAdder(auth authenticator.Request) authenticator.Request { + return &AuthenticatedGroupAdder{auth} +} + +func (g *AuthenticatedGroupAdder) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + r, ok, err := g.Authenticator.AuthenticateRequest(req) + if err != nil || !ok { + return nil, ok, err + } + + if r.User.GetName() == user.Anonymous { + return r, true, nil + } + for _, group := range r.User.GetGroups() { + if group == user.AllAuthenticated || group == user.AllUnauthenticated { + return r, true, nil + } + } + + r.User = &user.DefaultInfo{ + Name: r.User.GetName(), + UID: r.User.GetUID(), + Groups: append(r.User.GetGroups(), user.AllAuthenticated), + Extra: r.User.GetExtra(), + } + return r, true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go b/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go new file mode 100644 index 0000000000..3079dad625 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "net/http" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" +) + +// GroupAdder adds groups to an authenticated user.Info +type GroupAdder struct { + // Authenticator is delegated to make the authentication decision + Authenticator authenticator.Request + // Groups are additional groups to add to the user.Info from a successful authentication + Groups []string +} + +// NewGroupAdder wraps a request authenticator, and adds the specified groups to the returned user when authentication succeeds +func NewGroupAdder(auth authenticator.Request, groups []string) authenticator.Request { + return &GroupAdder{auth, groups} +} + +func (g *GroupAdder) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + r, ok, err := g.Authenticator.AuthenticateRequest(req) + if err != nil || !ok { + return nil, ok, err + } + r.User = &user.DefaultInfo{ + Name: r.User.GetName(), + UID: r.User.GetUID(), + Groups: append(r.User.GetGroups(), g.Groups...), + Extra: r.User.GetExtra(), + } + return r, true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go b/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go new file mode 100644 index 0000000000..0ed5ee5596 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "context" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" +) + +// TokenGroupAdder adds groups to an authenticated user.Info +type TokenGroupAdder struct { + // Authenticator is delegated to make the authentication decision + Authenticator authenticator.Token + // Groups are additional groups to add to the user.Info from a successful authentication + Groups []string +} + +// NewTokenGroupAdder wraps a token authenticator, and adds the specified groups to the returned user when authentication succeeds +func NewTokenGroupAdder(auth authenticator.Token, groups []string) authenticator.Token { + return &TokenGroupAdder{auth, groups} +} + +func (g *TokenGroupAdder) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + r, ok, err := g.Authenticator.AuthenticateToken(ctx, token) + if err != nil || !ok { + return nil, ok, err + } + r.User = &user.DefaultInfo{ + Name: r.User.GetName(), + UID: r.User.GetUID(), + Groups: append(r.User.GetGroups(), g.Groups...), + Extra: r.User.GetExtra(), + } + return r, true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go b/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go new file mode 100644 index 0000000000..f9177d1513 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go @@ -0,0 +1,43 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package anonymous + +import ( + "net/http" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" +) + +const ( + anonymousUser = user.Anonymous + + unauthenticatedGroup = user.AllUnauthenticated +) + +func NewAuthenticator() authenticator.Request { + return authenticator.RequestFunc(func(req *http.Request) (*authenticator.Response, bool, error) { + auds, _ := authenticator.AudiencesFrom(req.Context()) + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: anonymousUser, + Groups: []string{unauthenticatedGroup}, + }, + Audiences: auds, + }, true, nil + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go new file mode 100644 index 0000000000..292b4f57d0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go @@ -0,0 +1,67 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bearertoken + +import ( + "errors" + "net/http" + "strings" + + "k8s.io/apiserver/pkg/authentication/authenticator" +) + +type Authenticator struct { + auth authenticator.Token +} + +func New(auth authenticator.Token) *Authenticator { + return &Authenticator{auth} +} + +var invalidToken = errors.New("invalid bearer token") + +func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + auth := strings.TrimSpace(req.Header.Get("Authorization")) + if auth == "" { + return nil, false, nil + } + parts := strings.SplitN(auth, " ", 3) + if len(parts) < 2 || strings.ToLower(parts[0]) != "bearer" { + return nil, false, nil + } + + token := parts[1] + + // Empty bearer tokens aren't valid + if len(token) == 0 { + return nil, false, nil + } + + resp, ok, err := a.auth.AuthenticateToken(req.Context(), token) + // if we authenticated successfully, go ahead and remove the bearer token so that no one + // is ever tempted to use it inside of the API server + if ok { + req.Header.Del("Authorization") + } + + // If the token authenticator didn't error, provide a default error + if !ok && err == nil { + err = invalidToken + } + + return resp, ok, err +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go new file mode 100644 index 0000000000..abf509a97d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go @@ -0,0 +1,239 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package headerrequest + +import ( + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "k8s.io/apiserver/pkg/authentication/authenticator" + x509request "k8s.io/apiserver/pkg/authentication/request/x509" + "k8s.io/apiserver/pkg/authentication/user" + utilcert "k8s.io/client-go/util/cert" +) + +// StringSliceProvider is a way to get a string slice value. It is heavily used for authentication headers among other places. +type StringSliceProvider interface { + // Value returns the current string slice. Callers should never mutate the returned value. + Value() []string +} + +// StringSliceProviderFunc is a function that matches the StringSliceProvider interface +type StringSliceProviderFunc func() []string + +// Value returns the current string slice. Callers should never mutate the returned value. +func (d StringSliceProviderFunc) Value() []string { + return d() +} + +// StaticStringSlice a StringSliceProvider that returns a fixed value +type StaticStringSlice []string + +// Value returns the current string slice. Callers should never mutate the returned value. +func (s StaticStringSlice) Value() []string { + return s +} + +type requestHeaderAuthRequestHandler struct { + // nameHeaders are the headers to check (in order, case-insensitively) for an identity. The first header with a value wins. + nameHeaders StringSliceProvider + + // groupHeaders are the headers to check (case-insensitively) for group membership. All values of all headers will be added. + groupHeaders StringSliceProvider + + // extraHeaderPrefixes are the head prefixes to check (case-insensitively) for filling in + // the user.Info.Extra. All values of all matching headers will be added. + extraHeaderPrefixes StringSliceProvider +} + +func New(nameHeaders, groupHeaders, extraHeaderPrefixes []string) (authenticator.Request, error) { + trimmedNameHeaders, err := trimHeaders(nameHeaders...) + if err != nil { + return nil, err + } + trimmedGroupHeaders, err := trimHeaders(groupHeaders...) + if err != nil { + return nil, err + } + trimmedExtraHeaderPrefixes, err := trimHeaders(extraHeaderPrefixes...) + if err != nil { + return nil, err + } + + return NewDynamic( + StaticStringSlice(trimmedNameHeaders), + StaticStringSlice(trimmedGroupHeaders), + StaticStringSlice(trimmedExtraHeaderPrefixes), + ), nil +} + +func NewDynamic(nameHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) authenticator.Request { + return &requestHeaderAuthRequestHandler{ + nameHeaders: nameHeaders, + groupHeaders: groupHeaders, + extraHeaderPrefixes: extraHeaderPrefixes, + } +} + +func trimHeaders(headerNames ...string) ([]string, error) { + ret := []string{} + for _, headerName := range headerNames { + trimmedHeader := strings.TrimSpace(headerName) + if len(trimmedHeader) == 0 { + return nil, fmt.Errorf("empty header %q", headerName) + } + ret = append(ret, trimmedHeader) + } + + return ret, nil +} + +func NewSecure(clientCA string, proxyClientNames []string, nameHeaders []string, groupHeaders []string, extraHeaderPrefixes []string) (authenticator.Request, error) { + if len(clientCA) == 0 { + return nil, fmt.Errorf("missing clientCA file") + } + + // Wrap with an x509 verifier + caData, err := ioutil.ReadFile(clientCA) + if err != nil { + return nil, fmt.Errorf("error reading %s: %v", clientCA, err) + } + opts := x509request.DefaultVerifyOptions() + opts.Roots = x509.NewCertPool() + certs, err := utilcert.ParseCertsPEM(caData) + if err != nil { + return nil, fmt.Errorf("error loading certs from %s: %v", clientCA, err) + } + for _, cert := range certs { + opts.Roots.AddCert(cert) + } + + trimmedNameHeaders, err := trimHeaders(nameHeaders...) + if err != nil { + return nil, err + } + trimmedGroupHeaders, err := trimHeaders(groupHeaders...) + if err != nil { + return nil, err + } + trimmedExtraHeaderPrefixes, err := trimHeaders(extraHeaderPrefixes...) + if err != nil { + return nil, err + } + + return NewDynamicVerifyOptionsSecure( + x509request.StaticVerifierFn(opts), + StaticStringSlice(proxyClientNames), + StaticStringSlice(trimmedNameHeaders), + StaticStringSlice(trimmedGroupHeaders), + StaticStringSlice(trimmedExtraHeaderPrefixes), + ), nil +} + +func NewDynamicVerifyOptionsSecure(verifyOptionFn x509request.VerifyOptionFunc, proxyClientNames, nameHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) authenticator.Request { + headerAuthenticator := NewDynamic(nameHeaders, groupHeaders, extraHeaderPrefixes) + + return x509request.NewDynamicCAVerifier(verifyOptionFn, headerAuthenticator, proxyClientNames) +} + +func (a *requestHeaderAuthRequestHandler) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + name := headerValue(req.Header, a.nameHeaders.Value()) + if len(name) == 0 { + return nil, false, nil + } + groups := allHeaderValues(req.Header, a.groupHeaders.Value()) + extra := newExtra(req.Header, a.extraHeaderPrefixes.Value()) + + // clear headers used for authentication + for _, headerName := range a.nameHeaders.Value() { + req.Header.Del(headerName) + } + for _, headerName := range a.groupHeaders.Value() { + req.Header.Del(headerName) + } + for k := range extra { + for _, prefix := range a.extraHeaderPrefixes.Value() { + req.Header.Del(prefix + k) + } + } + + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: name, + Groups: groups, + Extra: extra, + }, + }, true, nil +} + +func headerValue(h http.Header, headerNames []string) string { + for _, headerName := range headerNames { + headerValue := h.Get(headerName) + if len(headerValue) > 0 { + return headerValue + } + } + return "" +} + +func allHeaderValues(h http.Header, headerNames []string) []string { + ret := []string{} + for _, headerName := range headerNames { + headerKey := http.CanonicalHeaderKey(headerName) + values, ok := h[headerKey] + if !ok { + continue + } + + for _, headerValue := range values { + if len(headerValue) > 0 { + ret = append(ret, headerValue) + } + } + } + return ret +} + +func unescapeExtraKey(encodedKey string) string { + key, err := url.PathUnescape(encodedKey) // Decode %-encoded bytes. + if err != nil { + return encodedKey // Always record extra strings, even if malformed/unencoded. + } + return key +} + +func newExtra(h http.Header, headerPrefixes []string) map[string][]string { + ret := map[string][]string{} + + // we have to iterate over prefixes first in order to have proper ordering inside the value slices + for _, prefix := range headerPrefixes { + for headerName, vv := range h { + if !strings.HasPrefix(strings.ToLower(headerName), strings.ToLower(prefix)) { + continue + } + + extraKey := unescapeExtraKey(strings.ToLower(headerName[len(prefix):])) + ret[extraKey] = append(ret[extraKey], vv...) + } + } + + return ret +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go new file mode 100644 index 0000000000..561b6fba9b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go @@ -0,0 +1,337 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package headerrequest + +import ( + "context" + "encoding/json" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + coreinformers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + "sync/atomic" +) + +const ( + authenticationRoleName = "extension-apiserver-authentication-reader" +) + +// RequestHeaderAuthRequestProvider a provider that knows how to dynamically fill parts of RequestHeaderConfig struct +type RequestHeaderAuthRequestProvider interface { + UsernameHeaders() []string + GroupHeaders() []string + ExtraHeaderPrefixes() []string + AllowedClientNames() []string +} + +var _ RequestHeaderAuthRequestProvider = &RequestHeaderAuthRequestController{} + +type requestHeaderBundle struct { + UsernameHeaders []string + GroupHeaders []string + ExtraHeaderPrefixes []string + AllowedClientNames []string +} + +// RequestHeaderAuthRequestController a controller that exposes a set of methods for dynamically filling parts of RequestHeaderConfig struct. +// The methods are sourced from the config map which is being monitored by this controller. +// The controller is primed from the server at the construction time for components that don't want to dynamically react to changes +// in the config map. +type RequestHeaderAuthRequestController struct { + name string + + configmapName string + configmapNamespace string + + client kubernetes.Interface + configmapLister corev1listers.ConfigMapNamespaceLister + configmapInformer cache.SharedIndexInformer + configmapInformerSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface + + // exportedRequestHeaderBundle is a requestHeaderBundle that contains the last read, non-zero length content of the configmap + exportedRequestHeaderBundle atomic.Value + + usernameHeadersKey string + groupHeadersKey string + extraHeaderPrefixesKey string + allowedClientNamesKey string +} + +// NewRequestHeaderAuthRequestController creates a new controller that implements RequestHeaderAuthRequestController +func NewRequestHeaderAuthRequestController( + cmName string, + cmNamespace string, + client kubernetes.Interface, + usernameHeadersKey, groupHeadersKey, extraHeaderPrefixesKey, allowedClientNamesKey string) *RequestHeaderAuthRequestController { + c := &RequestHeaderAuthRequestController{ + name: "RequestHeaderAuthRequestController", + + client: client, + + configmapName: cmName, + configmapNamespace: cmNamespace, + + usernameHeadersKey: usernameHeadersKey, + groupHeadersKey: groupHeadersKey, + extraHeaderPrefixesKey: extraHeaderPrefixesKey, + allowedClientNamesKey: allowedClientNamesKey, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "RequestHeaderAuthRequestController"), + } + + // we construct our own informer because we need such a small subset of the information available. Just one namespace. + c.configmapInformer = coreinformers.NewFilteredConfigMapInformer(client, c.configmapNamespace, 12*time.Hour, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, func(listOptions *metav1.ListOptions) { + listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", c.configmapName).String() + }) + + c.configmapInformer.AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + if cast, ok := obj.(*corev1.ConfigMap); ok { + return cast.Name == c.configmapName && cast.Namespace == c.configmapNamespace + } + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + if cast, ok := tombstone.Obj.(*corev1.ConfigMap); ok { + return cast.Name == c.configmapName && cast.Namespace == c.configmapNamespace + } + } + return true // always return true just in case. The checks are fairly cheap + }, + Handler: cache.ResourceEventHandlerFuncs{ + // we have a filter, so any time we're called, we may as well queue. We only ever check one configmap + // so we don't have to be choosy about our key. + AddFunc: func(obj interface{}) { + c.queue.Add(c.keyFn()) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + c.queue.Add(c.keyFn()) + }, + DeleteFunc: func(obj interface{}) { + c.queue.Add(c.keyFn()) + }, + }, + }) + + c.configmapLister = corev1listers.NewConfigMapLister(c.configmapInformer.GetIndexer()).ConfigMaps(c.configmapNamespace) + c.configmapInformerSynced = c.configmapInformer.HasSynced + + return c +} + +func (c *RequestHeaderAuthRequestController) UsernameHeaders() []string { + return c.loadRequestHeaderFor(c.usernameHeadersKey) +} + +func (c *RequestHeaderAuthRequestController) GroupHeaders() []string { + return c.loadRequestHeaderFor(c.groupHeadersKey) +} + +func (c *RequestHeaderAuthRequestController) ExtraHeaderPrefixes() []string { + return c.loadRequestHeaderFor(c.extraHeaderPrefixesKey) +} + +func (c *RequestHeaderAuthRequestController) AllowedClientNames() []string { + return c.loadRequestHeaderFor(c.allowedClientNamesKey) +} + +// Run starts RequestHeaderAuthRequestController controller and blocks until stopCh is closed. +func (c *RequestHeaderAuthRequestController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting %s", c.name) + defer klog.Infof("Shutting down %s", c.name) + + go c.configmapInformer.Run(stopCh) + + // wait for caches to fill before starting your work + if !cache.WaitForNamedCacheSync(c.name, stopCh, c.configmapInformerSynced) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +// // RunOnce runs a single sync loop +func (c *RequestHeaderAuthRequestController) RunOnce() error { + configMap, err := c.client.CoreV1().ConfigMaps(c.configmapNamespace).Get(context.TODO(), c.configmapName, metav1.GetOptions{}) + switch { + case errors.IsNotFound(err): + // ignore, authConfigMap is nil now + return nil + case errors.IsForbidden(err): + klog.Warningf("Unable to get configmap/%s in %s. Usually fixed by "+ + "'kubectl create rolebinding -n %s ROLEBINDING_NAME --role=%s --serviceaccount=YOUR_NS:YOUR_SA'", + c.configmapName, c.configmapNamespace, c.configmapNamespace, authenticationRoleName) + return err + case err != nil: + return err + } + return c.syncConfigMap(configMap) +} + +func (c *RequestHeaderAuthRequestController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *RequestHeaderAuthRequestController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// sync reads the config and propagates the changes to exportedRequestHeaderBundle +// which is exposed by the set of methods that are used to fill RequestHeaderConfig struct +func (c *RequestHeaderAuthRequestController) sync() error { + configMap, err := c.configmapLister.Get(c.configmapName) + if err != nil { + return err + } + return c.syncConfigMap(configMap) +} + +func (c *RequestHeaderAuthRequestController) syncConfigMap(configMap *corev1.ConfigMap) error { + hasChanged, newRequestHeaderBundle, err := c.hasRequestHeaderBundleChanged(configMap) + if err != nil { + return err + } + if hasChanged { + c.exportedRequestHeaderBundle.Store(newRequestHeaderBundle) + klog.V(2).Infof("Loaded a new request header values for %v", c.name) + } + return nil +} + +func (c *RequestHeaderAuthRequestController) hasRequestHeaderBundleChanged(cm *corev1.ConfigMap) (bool, *requestHeaderBundle, error) { + currentHeadersBundle, err := c.getRequestHeaderBundleFromConfigMap(cm) + if err != nil { + return false, nil, err + } + + rawHeaderBundle := c.exportedRequestHeaderBundle.Load() + if rawHeaderBundle == nil { + return true, currentHeadersBundle, nil + } + + // check to see if we have a change. If the values are the same, do nothing. + loadedHeadersBundle, ok := rawHeaderBundle.(*requestHeaderBundle) + if !ok { + return true, currentHeadersBundle, nil + } + + if !equality.Semantic.DeepEqual(loadedHeadersBundle, currentHeadersBundle) { + return true, currentHeadersBundle, nil + } + return false, nil, nil +} + +func (c *RequestHeaderAuthRequestController) getRequestHeaderBundleFromConfigMap(cm *corev1.ConfigMap) (*requestHeaderBundle, error) { + usernameHeaderCurrentValue, err := deserializeStrings(cm.Data[c.usernameHeadersKey]) + if err != nil { + return nil, err + } + + groupHeadersCurrentValue, err := deserializeStrings(cm.Data[c.groupHeadersKey]) + if err != nil { + return nil, err + } + + extraHeaderPrefixesCurrentValue, err := deserializeStrings(cm.Data[c.extraHeaderPrefixesKey]) + if err != nil { + return nil, err + + } + + allowedClientNamesCurrentValue, err := deserializeStrings(cm.Data[c.allowedClientNamesKey]) + if err != nil { + return nil, err + } + + return &requestHeaderBundle{ + UsernameHeaders: usernameHeaderCurrentValue, + GroupHeaders: groupHeadersCurrentValue, + ExtraHeaderPrefixes: extraHeaderPrefixesCurrentValue, + AllowedClientNames: allowedClientNamesCurrentValue, + }, nil +} + +func (c *RequestHeaderAuthRequestController) loadRequestHeaderFor(key string) []string { + rawHeaderBundle := c.exportedRequestHeaderBundle.Load() + if rawHeaderBundle == nil { + return nil // this can happen if we've been unable load data from the apiserver for some reason + } + headerBundle := rawHeaderBundle.(*requestHeaderBundle) + + switch key { + case c.usernameHeadersKey: + return headerBundle.UsernameHeaders + case c.groupHeadersKey: + return headerBundle.GroupHeaders + case c.extraHeaderPrefixesKey: + return headerBundle.ExtraHeaderPrefixes + case c.allowedClientNamesKey: + return headerBundle.AllowedClientNames + default: + return nil + } +} + +func (c *RequestHeaderAuthRequestController) keyFn() string { + // this format matches DeletionHandlingMetaNamespaceKeyFunc for our single key + return c.configmapNamespace + "/" + c.configmapName +} + +func deserializeStrings(in string) ([]string, error) { + if len(in) == 0 { + return nil, nil + } + var ret []string + if err := json.Unmarshal([]byte(in), &ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go b/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go new file mode 100644 index 0000000000..512063beea --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go @@ -0,0 +1,71 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package union + +import ( + "net/http" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/authentication/authenticator" +) + +// unionAuthRequestHandler authenticates requests using a chain of authenticator.Requests +type unionAuthRequestHandler struct { + // Handlers is a chain of request authenticators to delegate to + Handlers []authenticator.Request + // FailOnError determines whether an error returns short-circuits the chain + FailOnError bool +} + +// New returns a request authenticator that validates credentials using a chain of authenticator.Request objects. +// The entire chain is tried until one succeeds. If all fail, an aggregate error is returned. +func New(authRequestHandlers ...authenticator.Request) authenticator.Request { + if len(authRequestHandlers) == 1 { + return authRequestHandlers[0] + } + return &unionAuthRequestHandler{Handlers: authRequestHandlers, FailOnError: false} +} + +// NewFailOnError returns a request authenticator that validates credentials using a chain of authenticator.Request objects. +// The first error short-circuits the chain. +func NewFailOnError(authRequestHandlers ...authenticator.Request) authenticator.Request { + if len(authRequestHandlers) == 1 { + return authRequestHandlers[0] + } + return &unionAuthRequestHandler{Handlers: authRequestHandlers, FailOnError: true} +} + +// AuthenticateRequest authenticates the request using a chain of authenticator.Request objects. +func (authHandler *unionAuthRequestHandler) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + var errlist []error + for _, currAuthRequestHandler := range authHandler.Handlers { + resp, ok, err := currAuthRequestHandler.AuthenticateRequest(req) + if err != nil { + if authHandler.FailOnError { + return resp, ok, err + } + errlist = append(errlist, err) + continue + } + + if ok { + return resp, ok, err + } + } + + return nil, false, utilerrors.NewAggregate(errlist) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go new file mode 100644 index 0000000000..11afa84cbd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go @@ -0,0 +1,108 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package websocket + +import ( + "encoding/base64" + "errors" + "net/http" + "net/textproto" + "strings" + "unicode/utf8" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/util/wsstream" +) + +const bearerProtocolPrefix = "base64url.bearer.authorization.k8s.io." + +var protocolHeader = textproto.CanonicalMIMEHeaderKey("Sec-WebSocket-Protocol") + +var errInvalidToken = errors.New("invalid bearer token") + +// ProtocolAuthenticator allows a websocket connection to provide a bearer token as a subprotocol +// in the format "base64url.bearer.authorization." +type ProtocolAuthenticator struct { + // auth is the token authenticator to use to validate the token + auth authenticator.Token +} + +func NewProtocolAuthenticator(auth authenticator.Token) *ProtocolAuthenticator { + return &ProtocolAuthenticator{auth} +} + +func (a *ProtocolAuthenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + // Only accept websocket connections + if !wsstream.IsWebSocketRequest(req) { + return nil, false, nil + } + + token := "" + sawTokenProtocol := false + filteredProtocols := []string{} + for _, protocolHeader := range req.Header[protocolHeader] { + for _, protocol := range strings.Split(protocolHeader, ",") { + protocol = strings.TrimSpace(protocol) + + if !strings.HasPrefix(protocol, bearerProtocolPrefix) { + filteredProtocols = append(filteredProtocols, protocol) + continue + } + + if sawTokenProtocol { + return nil, false, errors.New("multiple base64.bearer.authorization tokens specified") + } + sawTokenProtocol = true + + encodedToken := strings.TrimPrefix(protocol, bearerProtocolPrefix) + decodedToken, err := base64.RawURLEncoding.DecodeString(encodedToken) + if err != nil { + return nil, false, errors.New("invalid base64.bearer.authorization token encoding") + } + if !utf8.Valid(decodedToken) { + return nil, false, errors.New("invalid base64.bearer.authorization token") + } + token = string(decodedToken) + } + } + + // Must pass at least one other subprotocol so that we can remove the one containing the bearer token, + // and there is at least one to echo back to the client + if len(token) > 0 && len(filteredProtocols) == 0 { + return nil, false, errors.New("missing additional subprotocol") + } + + if len(token) == 0 { + return nil, false, nil + } + + resp, ok, err := a.auth.AuthenticateToken(req.Context(), token) + + // on success, remove the protocol with the token + if ok { + // https://tools.ietf.org/html/rfc6455#section-11.3.4 indicates the Sec-WebSocket-Protocol header may appear multiple times + // in a request, and is logically the same as a single Sec-WebSocket-Protocol header field that contains all values + req.Header.Set(protocolHeader, strings.Join(filteredProtocols, ",")) + } + + // If the token authenticator didn't error, provide a default error + if !ok && err == nil { + err = errInvalidToken + } + + return resp, ok, err +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS new file mode 100644 index 0000000000..3cf0364383 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/doc.go b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/doc.go new file mode 100644 index 0000000000..8f3d36b66d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package x509 provides a request authenticator that validates and +// extracts user information from client certificates +package x509 // import "k8s.io/apiserver/pkg/authentication/request/x509" diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/verify_options.go b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/verify_options.go new file mode 100644 index 0000000000..462eb4cc95 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/verify_options.go @@ -0,0 +1,71 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package x509 + +import ( + "crypto/x509" + "fmt" + + "k8s.io/client-go/util/cert" +) + +// StaticVerifierFn is a VerifyOptionFunc that always returns the same value. This allows verify options that cannot change. +func StaticVerifierFn(opts x509.VerifyOptions) VerifyOptionFunc { + return func() (x509.VerifyOptions, bool) { + return opts, true + } +} + +// NewStaticVerifierFromFile creates a new verification func from a file. It reads the content and then fails. +// It will return a nil function if you pass an empty CA file. +func NewStaticVerifierFromFile(clientCA string) (VerifyOptionFunc, error) { + if len(clientCA) == 0 { + return nil, nil + } + + // Wrap with an x509 verifier + var err error + opts := DefaultVerifyOptions() + opts.Roots, err = cert.NewPool(clientCA) + if err != nil { + return nil, fmt.Errorf("error loading certs from %s: %v", clientCA, err) + } + + return StaticVerifierFn(opts), nil +} + +// StringSliceProvider is a way to get a string slice value. It is heavily used for authentication headers among other places. +type StringSliceProvider interface { + // Value returns the current string slice. Callers should never mutate the returned value. + Value() []string +} + +// StringSliceProviderFunc is a function that matches the StringSliceProvider interface +type StringSliceProviderFunc func() []string + +// Value returns the current string slice. Callers should never mutate the returned value. +func (d StringSliceProviderFunc) Value() []string { + return d() +} + +// StaticStringSlice a StringSliceProvider that returns a fixed value +type StaticStringSlice []string + +// Value returns the current string slice. Callers should never mutate the returned value. +func (s StaticStringSlice) Value() []string { + return s +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go new file mode 100644 index 0000000000..0116391409 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go @@ -0,0 +1,258 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package x509 + +import ( + "crypto/x509" + "crypto/x509/pkix" + "encoding/hex" + "fmt" + "net/http" + "strings" + "time" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +/* + * By default, the following metric is defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var clientCertificateExpirationHistogram = metrics.NewHistogram( + &metrics.HistogramOpts{ + Namespace: "apiserver", + Subsystem: "client", + Name: "certificate_expiration_seconds", + Help: "Distribution of the remaining lifetime on the certificate used to authenticate a request.", + Buckets: []float64{ + 0, + (30 * time.Minute).Seconds(), + (1 * time.Hour).Seconds(), + (2 * time.Hour).Seconds(), + (6 * time.Hour).Seconds(), + (12 * time.Hour).Seconds(), + (24 * time.Hour).Seconds(), + (2 * 24 * time.Hour).Seconds(), + (4 * 24 * time.Hour).Seconds(), + (7 * 24 * time.Hour).Seconds(), + (30 * 24 * time.Hour).Seconds(), + (3 * 30 * 24 * time.Hour).Seconds(), + (6 * 30 * 24 * time.Hour).Seconds(), + (12 * 30 * 24 * time.Hour).Seconds(), + }, + StabilityLevel: metrics.ALPHA, + }, +) + +func init() { + legacyregistry.MustRegister(clientCertificateExpirationHistogram) +} + +// UserConversion defines an interface for extracting user info from a client certificate chain +type UserConversion interface { + User(chain []*x509.Certificate) (*authenticator.Response, bool, error) +} + +// UserConversionFunc is a function that implements the UserConversion interface. +type UserConversionFunc func(chain []*x509.Certificate) (*authenticator.Response, bool, error) + +// User implements x509.UserConversion +func (f UserConversionFunc) User(chain []*x509.Certificate) (*authenticator.Response, bool, error) { + return f(chain) +} + +func columnSeparatedHex(d []byte) string { + h := strings.ToUpper(hex.EncodeToString(d)) + var sb strings.Builder + for i, r := range h { + sb.WriteRune(r) + if i%2 == 1 && i != len(h)-1 { + sb.WriteRune(':') + } + } + return sb.String() +} + +func certificateIdentifier(c *x509.Certificate) string { + return fmt.Sprintf( + "SN=%d, SKID=%s, AKID=%s", + c.SerialNumber, + columnSeparatedHex(c.SubjectKeyId), + columnSeparatedHex(c.AuthorityKeyId), + ) +} + +// VerifyOptionFunc is function which provides a shallow copy of the VerifyOptions to the authenticator. This allows +// for cases where the options (particularly the CAs) can change. If the bool is false, then the returned VerifyOptions +// are ignored and the authenticator will express "no opinion". This allows a clear signal for cases where a CertPool +// is eventually expected, but not currently present. +type VerifyOptionFunc func() (x509.VerifyOptions, bool) + +// Authenticator implements request.Authenticator by extracting user info from verified client certificates +type Authenticator struct { + verifyOptionsFn VerifyOptionFunc + user UserConversion +} + +// New returns a request.Authenticator that verifies client certificates using the provided +// VerifyOptions, and converts valid certificate chains into user.Info using the provided UserConversion +func New(opts x509.VerifyOptions, user UserConversion) *Authenticator { + return NewDynamic(StaticVerifierFn(opts), user) +} + +// NewDynamic returns a request.Authenticator that verifies client certificates using the provided +// VerifyOptionFunc (which may be dynamic), and converts valid certificate chains into user.Info using the provided UserConversion +func NewDynamic(verifyOptionsFn VerifyOptionFunc, user UserConversion) *Authenticator { + return &Authenticator{verifyOptionsFn, user} +} + +// AuthenticateRequest authenticates the request using presented client certificates +func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 { + return nil, false, nil + } + + // Use intermediates, if provided + optsCopy, ok := a.verifyOptionsFn() + // if there are intentionally no verify options, then we cannot authenticate this request + if !ok { + return nil, false, nil + } + if optsCopy.Intermediates == nil && len(req.TLS.PeerCertificates) > 1 { + optsCopy.Intermediates = x509.NewCertPool() + for _, intermediate := range req.TLS.PeerCertificates[1:] { + optsCopy.Intermediates.AddCert(intermediate) + } + } + + remaining := req.TLS.PeerCertificates[0].NotAfter.Sub(time.Now()) + clientCertificateExpirationHistogram.Observe(remaining.Seconds()) + chains, err := req.TLS.PeerCertificates[0].Verify(optsCopy) + if err != nil { + return nil, false, fmt.Errorf( + "verifying certificate %s failed: %w", + certificateIdentifier(req.TLS.PeerCertificates[0]), + err, + ) + } + + var errlist []error + for _, chain := range chains { + user, ok, err := a.user.User(chain) + if err != nil { + errlist = append(errlist, err) + continue + } + + if ok { + return user, ok, err + } + } + return nil, false, utilerrors.NewAggregate(errlist) +} + +// Verifier implements request.Authenticator by verifying a client cert on the request, then delegating to the wrapped auth +type Verifier struct { + verifyOptionsFn VerifyOptionFunc + auth authenticator.Request + + // allowedCommonNames contains the common names which a verified certificate is allowed to have. + // If empty, all verified certificates are allowed. + allowedCommonNames StringSliceProvider +} + +// NewVerifier create a request.Authenticator by verifying a client cert on the request, then delegating to the wrapped auth +func NewVerifier(opts x509.VerifyOptions, auth authenticator.Request, allowedCommonNames sets.String) authenticator.Request { + return NewDynamicCAVerifier(StaticVerifierFn(opts), auth, StaticStringSlice(allowedCommonNames.List())) +} + +// NewDynamicCAVerifier create a request.Authenticator by verifying a client cert on the request, then delegating to the wrapped auth +func NewDynamicCAVerifier(verifyOptionsFn VerifyOptionFunc, auth authenticator.Request, allowedCommonNames StringSliceProvider) authenticator.Request { + return &Verifier{verifyOptionsFn, auth, allowedCommonNames} +} + +// AuthenticateRequest verifies the presented client certificate, then delegates to the wrapped auth +func (a *Verifier) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 { + return nil, false, nil + } + + // Use intermediates, if provided + optsCopy, ok := a.verifyOptionsFn() + // if there are intentionally no verify options, then we cannot authenticate this request + if !ok { + return nil, false, nil + } + if optsCopy.Intermediates == nil && len(req.TLS.PeerCertificates) > 1 { + optsCopy.Intermediates = x509.NewCertPool() + for _, intermediate := range req.TLS.PeerCertificates[1:] { + optsCopy.Intermediates.AddCert(intermediate) + } + } + + if _, err := req.TLS.PeerCertificates[0].Verify(optsCopy); err != nil { + return nil, false, err + } + if err := a.verifySubject(req.TLS.PeerCertificates[0].Subject); err != nil { + return nil, false, err + } + return a.auth.AuthenticateRequest(req) +} + +func (a *Verifier) verifySubject(subject pkix.Name) error { + // No CN restrictions + if len(a.allowedCommonNames.Value()) == 0 { + return nil + } + // Enforce CN restrictions + for _, allowedCommonName := range a.allowedCommonNames.Value() { + if allowedCommonName == subject.CommonName { + return nil + } + } + return fmt.Errorf("x509: subject with cn=%s is not in the allowed list", subject.CommonName) +} + +// DefaultVerifyOptions returns VerifyOptions that use the system root certificates, current time, +// and requires certificates to be valid for client auth (x509.ExtKeyUsageClientAuth) +func DefaultVerifyOptions() x509.VerifyOptions { + return x509.VerifyOptions{ + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} + +// CommonNameUserConversion builds user info from a certificate chain using the subject's CommonName +var CommonNameUserConversion = UserConversionFunc(func(chain []*x509.Certificate) (*authenticator.Response, bool, error) { + if len(chain[0].Subject.CommonName) == 0 { + return nil, false, nil + } + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: chain[0].Subject.CommonName, + Groups: chain[0].Subject.Organization, + }, + }, true, nil +}) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go new file mode 100644 index 0000000000..f0dc076763 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go @@ -0,0 +1,183 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "context" + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/authentication/user" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + + "k8s.io/klog/v2" +) + +const ( + ServiceAccountUsernamePrefix = "system:serviceaccount:" + ServiceAccountUsernameSeparator = ":" + ServiceAccountGroupPrefix = "system:serviceaccounts:" + AllServiceAccountsGroup = "system:serviceaccounts" + // PodNameKey is the key used in a user's "extra" to specify the pod name of + // the authenticating request. + PodNameKey = "authentication.kubernetes.io/pod-name" + // PodUIDKey is the key used in a user's "extra" to specify the pod UID of + // the authenticating request. + PodUIDKey = "authentication.kubernetes.io/pod-uid" +) + +// MakeUsername generates a username from the given namespace and ServiceAccount name. +// The resulting username can be passed to SplitUsername to extract the original namespace and ServiceAccount name. +func MakeUsername(namespace, name string) string { + return ServiceAccountUsernamePrefix + namespace + ServiceAccountUsernameSeparator + name +} + +// MatchesUsername checks whether the provided username matches the namespace and name without +// allocating. Use this when checking a service account namespace and name against a known string. +func MatchesUsername(namespace, name string, username string) bool { + if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) { + return false + } + username = username[len(ServiceAccountUsernamePrefix):] + + if !strings.HasPrefix(username, namespace) { + return false + } + username = username[len(namespace):] + + if !strings.HasPrefix(username, ServiceAccountUsernameSeparator) { + return false + } + username = username[len(ServiceAccountUsernameSeparator):] + + return username == name +} + +var invalidUsernameErr = fmt.Errorf("Username must be in the form %s", MakeUsername("namespace", "name")) + +// SplitUsername returns the namespace and ServiceAccount name embedded in the given username, +// or an error if the username is not a valid name produced by MakeUsername +func SplitUsername(username string) (string, string, error) { + if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) { + return "", "", invalidUsernameErr + } + trimmed := strings.TrimPrefix(username, ServiceAccountUsernamePrefix) + parts := strings.Split(trimmed, ServiceAccountUsernameSeparator) + if len(parts) != 2 { + return "", "", invalidUsernameErr + } + namespace, name := parts[0], parts[1] + if len(apimachineryvalidation.ValidateNamespaceName(namespace, false)) != 0 { + return "", "", invalidUsernameErr + } + if len(apimachineryvalidation.ValidateServiceAccountName(name, false)) != 0 { + return "", "", invalidUsernameErr + } + return namespace, name, nil +} + +// MakeGroupNames generates service account group names for the given namespace +func MakeGroupNames(namespace string) []string { + return []string{ + AllServiceAccountsGroup, + MakeNamespaceGroupName(namespace), + } +} + +// MakeNamespaceGroupName returns the name of the group all service accounts in the namespace are included in +func MakeNamespaceGroupName(namespace string) string { + return ServiceAccountGroupPrefix + namespace +} + +// UserInfo returns a user.Info interface for the given namespace, service account name and UID +func UserInfo(namespace, name, uid string) user.Info { + return (&ServiceAccountInfo{ + Name: name, + Namespace: namespace, + UID: uid, + }).UserInfo() +} + +type ServiceAccountInfo struct { + Name, Namespace, UID string + PodName, PodUID string +} + +func (sa *ServiceAccountInfo) UserInfo() user.Info { + info := &user.DefaultInfo{ + Name: MakeUsername(sa.Namespace, sa.Name), + UID: sa.UID, + Groups: MakeGroupNames(sa.Namespace), + } + if sa.PodName != "" && sa.PodUID != "" { + info.Extra = map[string][]string{ + PodNameKey: {sa.PodName}, + PodUIDKey: {sa.PodUID}, + } + } + return info +} + +// IsServiceAccountToken returns true if the secret is a valid api token for the service account +func IsServiceAccountToken(secret *v1.Secret, sa *v1.ServiceAccount) bool { + if secret.Type != v1.SecretTypeServiceAccountToken { + return false + } + + name := secret.Annotations[v1.ServiceAccountNameKey] + uid := secret.Annotations[v1.ServiceAccountUIDKey] + if name != sa.Name { + // Name must match + return false + } + if len(uid) > 0 && uid != string(sa.UID) { + // If UID is specified, it must match + return false + } + + return true +} + +func GetOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, name string) (*v1.ServiceAccount, error) { + sa, err := coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + return sa, nil + } + if !apierrors.IsNotFound(err) { + return nil, err + } + + // Create the namespace if we can't verify it exists. + // Tolerate errors, since we don't know whether this component has namespace creation permissions. + if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) { + if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { + klog.Warningf("create non-exist namespace %s failed:%v", namespace, err) + } + } + + // Create the service account + sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + // If we're racing to init and someone else already created it, re-fetch + return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + } + return sa, err +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go new file mode 100644 index 0000000000..8e0520af16 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "time" + + utilcache "k8s.io/apimachinery/pkg/util/cache" + "k8s.io/apimachinery/pkg/util/clock" +) + +type simpleCache struct { + cache *utilcache.Expiring +} + +func newSimpleCache(clock clock.Clock) cache { + return &simpleCache{cache: utilcache.NewExpiringWithClock(clock)} +} + +func (c *simpleCache) get(key string) (*cacheRecord, bool) { + record, ok := c.cache.Get(key) + if !ok { + return nil, false + } + value, ok := record.(*cacheRecord) + return value, ok +} + +func (c *simpleCache) set(key string, value *cacheRecord, ttl time.Duration) { + c.cache.Set(key, value, ttl) +} + +func (c *simpleCache) remove(key string) { + c.cache.Delete(key) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go new file mode 100644 index 0000000000..e5b7afe4e7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "hash/fnv" + "time" +) + +// split cache lookups across N striped caches +type stripedCache struct { + stripeCount uint32 + hashFunc func(string) uint32 + caches []cache +} + +type hashFunc func(string) uint32 +type newCacheFunc func() cache + +func newStripedCache(stripeCount int, hash hashFunc, newCacheFunc newCacheFunc) cache { + caches := []cache{} + for i := 0; i < stripeCount; i++ { + caches = append(caches, newCacheFunc()) + } + return &stripedCache{ + stripeCount: uint32(stripeCount), + hashFunc: hash, + caches: caches, + } +} + +func (c *stripedCache) get(key string) (*cacheRecord, bool) { + return c.caches[c.hashFunc(key)%c.stripeCount].get(key) +} +func (c *stripedCache) set(key string, value *cacheRecord, ttl time.Duration) { + c.caches[c.hashFunc(key)%c.stripeCount].set(key, value, ttl) +} +func (c *stripedCache) remove(key string) { + c.caches[c.hashFunc(key)%c.stripeCount].remove(key) +} + +func fnvHashFunc(key string) uint32 { + f := fnv.New32() + f.Write([]byte(key)) + return f.Sum32() +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go new file mode 100644 index 0000000000..a10564f04d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go @@ -0,0 +1,272 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/binary" + "errors" + "hash" + "io" + "runtime" + "sync" + "time" + "unsafe" + + "golang.org/x/sync/singleflight" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilclock "k8s.io/apimachinery/pkg/util/clock" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/klog/v2" +) + +var errAuthnCrash = apierrors.NewInternalError(errors.New("authentication failed unexpectedly")) + +const sharedLookupTimeout = 30 * time.Second + +// cacheRecord holds the three return values of the authenticator.Token AuthenticateToken method +type cacheRecord struct { + resp *authenticator.Response + ok bool + err error + + // this cache assumes token authn has no side-effects or temporal dependence. + // neither of these are true for audit annotations set via AddAuditAnnotation. + // + // for audit annotations, the assumption is that for some period of time (cache TTL), + // all requests with the same API audiences and the same bearer token result in the + // same annotations. This may not be true if the authenticator sets an annotation + // based on the current time, but that may be okay since cache TTLs are generally + // small (seconds). + annotations map[string]string +} + +type cachedTokenAuthenticator struct { + authenticator authenticator.Token + + cacheErrs bool + successTTL time.Duration + failureTTL time.Duration + + cache cache + group singleflight.Group + + // hashPool is a per authenticator pool of hash.Hash (to avoid allocations from building the Hash) + // HMAC with SHA-256 and a random key is used to prevent precomputation and length extension attacks + // It also mitigates hash map DOS attacks via collisions (the inputs are supplied by untrusted users) + hashPool *sync.Pool +} + +type cache interface { + // given a key, return the record, and whether or not it existed + get(key string) (value *cacheRecord, exists bool) + // caches the record for the key + set(key string, value *cacheRecord, ttl time.Duration) + // removes the record for the key + remove(key string) +} + +// New returns a token authenticator that caches the results of the specified authenticator. A ttl of 0 bypasses the cache. +func New(authenticator authenticator.Token, cacheErrs bool, successTTL, failureTTL time.Duration) authenticator.Token { + return newWithClock(authenticator, cacheErrs, successTTL, failureTTL, utilclock.RealClock{}) +} + +func newWithClock(authenticator authenticator.Token, cacheErrs bool, successTTL, failureTTL time.Duration, clock utilclock.Clock) authenticator.Token { + randomCacheKey := make([]byte, 32) + if _, err := rand.Read(randomCacheKey); err != nil { + panic(err) // rand should never fail + } + + return &cachedTokenAuthenticator{ + authenticator: authenticator, + cacheErrs: cacheErrs, + successTTL: successTTL, + failureTTL: failureTTL, + // Cache performance degrades noticeably when the number of + // tokens in operation exceeds the size of the cache. It is + // cheap to make the cache big in the second dimension below, + // the memory is only consumed when that many tokens are being + // used. Currently we advertise support 5k nodes and 10k + // namespaces; a 32k entry cache is therefore a 2x safety + // margin. + cache: newStripedCache(32, fnvHashFunc, func() cache { return newSimpleCache(clock) }), + + hashPool: &sync.Pool{ + New: func() interface{} { + return hmac.New(sha256.New, randomCacheKey) + }, + }, + } +} + +// AuthenticateToken implements authenticator.Token +func (a *cachedTokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + record := a.doAuthenticateToken(ctx, token) + if !record.ok || record.err != nil { + return nil, false, record.err + } + for key, value := range record.annotations { + audit.AddAuditAnnotation(ctx, key, value) + } + return record.resp, true, nil +} + +func (a *cachedTokenAuthenticator) doAuthenticateToken(ctx context.Context, token string) *cacheRecord { + doneAuthenticating := stats.authenticating() + + auds, audsOk := authenticator.AudiencesFrom(ctx) + + key := keyFunc(a.hashPool, auds, token) + if record, ok := a.cache.get(key); ok { + // Record cache hit + doneAuthenticating(true) + return record + } + + // Record cache miss + doneBlocking := stats.blocking() + defer doneBlocking() + defer doneAuthenticating(false) + + c := a.group.DoChan(key, func() (val interface{}, _ error) { + // always use one place to read and write the output of AuthenticateToken + record := &cacheRecord{} + + doneFetching := stats.fetching() + // We're leaving the request handling stack so we need to handle crashes + // ourselves. Log a stack trace and return a 500 if something panics. + defer func() { + if r := recover(); r != nil { + // make sure to always return a record + record.err = errAuthnCrash + val = record + + // Same as stdlib http server code. Manually allocate stack + // trace buffer size to prevent excessively large logs + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + klog.Errorf("%v\n%s", r, buf) + } + doneFetching(record.err == nil) + }() + + // Check again for a cached record. We may have raced with a fetch. + if record, ok := a.cache.get(key); ok { + return record, nil + } + + // Detach the context because the lookup may be shared by multiple callers, + // however propagate the audience. + ctx, cancel := context.WithTimeout(context.Background(), sharedLookupTimeout) + defer cancel() + + if audsOk { + ctx = authenticator.WithAudiences(ctx, auds) + } + + // since this is shared work between multiple requests, we have no way of knowing if any + // particular request supports audit annotations. thus we always attempt to record them. + ev := &auditinternal.Event{Level: auditinternal.LevelMetadata} + ctx = request.WithAuditEvent(ctx, ev) + + record.resp, record.ok, record.err = a.authenticator.AuthenticateToken(ctx, token) + record.annotations = ev.Annotations + + if !a.cacheErrs && record.err != nil { + return record, nil + } + + switch { + case record.ok && a.successTTL > 0: + a.cache.set(key, record, a.successTTL) + case !record.ok && a.failureTTL > 0: + a.cache.set(key, record, a.failureTTL) + } + + return record, nil + }) + + select { + case result := <-c: + // we always set Val and never set Err + return result.Val.(*cacheRecord) + case <-ctx.Done(): + // fake a record on context cancel + return &cacheRecord{err: ctx.Err()} + } +} + +// keyFunc generates a string key by hashing the inputs. +// This lowers the memory requirement of the cache and keeps tokens out of memory. +func keyFunc(hashPool *sync.Pool, auds []string, token string) string { + h := hashPool.Get().(hash.Hash) + + h.Reset() + + // try to force stack allocation + var a [4]byte + b := a[:] + + writeLengthPrefixedString(h, b, token) + // encode the length of audiences to avoid ambiguities + writeLength(h, b, len(auds)) + for _, aud := range auds { + writeLengthPrefixedString(h, b, aud) + } + + key := toString(h.Sum(nil)) // skip base64 encoding to save an allocation + + hashPool.Put(h) + + return key +} + +// writeLengthPrefixedString writes s with a length prefix to prevent ambiguities, i.e. "xy" + "z" == "x" + "yz" +// the length of b is assumed to be 4 (b is mutated by this function to store the length of s) +func writeLengthPrefixedString(w io.Writer, b []byte, s string) { + writeLength(w, b, len(s)) + if _, err := w.Write(toBytes(s)); err != nil { + panic(err) // Write() on hash never fails + } +} + +// writeLength encodes length into b and then writes it via the given writer +// the length of b is assumed to be 4 +func writeLength(w io.Writer, b []byte, length int) { + binary.BigEndian.PutUint32(b, uint32(length)) + if _, err := w.Write(b); err != nil { + panic(err) // Write() on hash never fails + } +} + +// toBytes performs unholy acts to avoid allocations +func toBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&s)) +} + +// toString performs unholy acts to avoid allocations +func toString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/stats.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/stats.go new file mode 100644 index 0000000000..dbe745718e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/stats.go @@ -0,0 +1,125 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "time" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +var ( + requestLatency = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: "authentication", + Subsystem: "token_cache", + Name: "request_duration_seconds", + StabilityLevel: metrics.ALPHA, + }, + []string{"status"}, + ) + requestCount = metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: "authentication", + Subsystem: "token_cache", + Name: "request_total", + StabilityLevel: metrics.ALPHA, + }, + []string{"status"}, + ) + fetchCount = metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: "authentication", + Subsystem: "token_cache", + Name: "fetch_total", + StabilityLevel: metrics.ALPHA, + }, + []string{"status"}, + ) + activeFetchCount = metrics.NewGaugeVec( + &metrics.GaugeOpts{ + Namespace: "authentication", + Subsystem: "token_cache", + Name: "active_fetch_count", + StabilityLevel: metrics.ALPHA, + }, + []string{"status"}, + ) +) + +func init() { + legacyregistry.MustRegister( + requestLatency, + requestCount, + fetchCount, + activeFetchCount, + ) +} + +const ( + hitTag = "hit" + missTag = "miss" + + fetchFailedTag = "error" + fetchOkTag = "ok" + + fetchInFlightTag = "in_flight" + fetchBlockedTag = "blocked" +) + +type statsCollector struct{} + +var stats = statsCollector{} + +func (statsCollector) authenticating() func(hit bool) { + start := time.Now() + return func(hit bool) { + var tag string + if hit { + tag = hitTag + } else { + tag = missTag + } + + latency := time.Since(start) + + requestCount.WithLabelValues(tag).Inc() + requestLatency.WithLabelValues(tag).Observe(float64(latency.Milliseconds()) / 1000) + } +} + +func (statsCollector) blocking() func() { + activeFetchCount.WithLabelValues(fetchBlockedTag).Inc() + return activeFetchCount.WithLabelValues(fetchBlockedTag).Dec +} + +func (statsCollector) fetching() func(ok bool) { + activeFetchCount.WithLabelValues(fetchInFlightTag).Inc() + return func(ok bool) { + var tag string + if ok { + tag = fetchOkTag + } else { + tag = fetchFailedTag + } + + fetchCount.WithLabelValues(tag).Inc() + + activeFetchCount.WithLabelValues(fetchInFlightTag).Dec() + } +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go new file mode 100644 index 0000000000..bd7fccbdca --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go @@ -0,0 +1,99 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tokenfile + +import ( + "context" + "encoding/csv" + "fmt" + "io" + "os" + "strings" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/klog/v2" +) + +type TokenAuthenticator struct { + tokens map[string]*user.DefaultInfo +} + +// New returns a TokenAuthenticator for a single token +func New(tokens map[string]*user.DefaultInfo) *TokenAuthenticator { + return &TokenAuthenticator{ + tokens: tokens, + } +} + +// NewCSV returns a TokenAuthenticator, populated from a CSV file. +// The CSV file must contain records in the format "token,username,useruid" +func NewCSV(path string) (*TokenAuthenticator, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + recordNum := 0 + tokens := make(map[string]*user.DefaultInfo) + reader := csv.NewReader(file) + reader.FieldsPerRecord = -1 + for { + record, err := reader.Read() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if len(record) < 3 { + return nil, fmt.Errorf("token file '%s' must have at least 3 columns (token, user name, user uid), found %d", path, len(record)) + } + + recordNum++ + if record[0] == "" { + klog.Warningf("empty token has been found in token file '%s', record number '%d'", path, recordNum) + continue + } + + obj := &user.DefaultInfo{ + Name: record[1], + UID: record[2], + } + if _, exist := tokens[record[0]]; exist { + klog.Warningf("duplicate token has been found in token file '%s', record number '%d'", path, recordNum) + } + tokens[record[0]] = obj + + if len(record) >= 4 { + obj.Groups = strings.Split(record[3], ",") + } + } + + return &TokenAuthenticator{ + tokens: tokens, + }, nil +} + +func (a *TokenAuthenticator) AuthenticateToken(ctx context.Context, value string) (*authenticator.Response, bool, error) { + user, ok := a.tokens[value] + if !ok { + return nil, false, nil + } + return &authenticator.Response{User: user}, true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go b/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go new file mode 100644 index 0000000000..3d87fd72ca --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package user contains utilities for dealing with simple user exchange in the auth +// packages. The user.Info interface defines an interface for exchanging that info. +package user // import "k8s.io/apiserver/pkg/authentication/user" diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/user.go b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go new file mode 100644 index 0000000000..4d6ec09800 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go @@ -0,0 +1,84 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user + +// Info describes a user that has been authenticated to the system. +type Info interface { + // GetName returns the name that uniquely identifies this user among all + // other active users. + GetName() string + // GetUID returns a unique value for a particular user that will change + // if the user is removed from the system and another user is added with + // the same name. + GetUID() string + // GetGroups returns the names of the groups the user is a member of + GetGroups() []string + + // GetExtra can contain any additional information that the authenticator + // thought was interesting. One example would be scopes on a token. + // Keys in this map should be namespaced to the authenticator or + // authenticator/authorizer pair making use of them. + // For instance: "example.org/foo" instead of "foo" + // This is a map[string][]string because it needs to be serializeable into + // a SubjectAccessReviewSpec.authorization.k8s.io for proper authorization + // delegation flows + // In order to faithfully round-trip through an impersonation flow, these keys + // MUST be lowercase. + GetExtra() map[string][]string +} + +// DefaultInfo provides a simple user information exchange object +// for components that implement the UserInfo interface. +type DefaultInfo struct { + Name string + UID string + Groups []string + Extra map[string][]string +} + +func (i *DefaultInfo) GetName() string { + return i.Name +} + +func (i *DefaultInfo) GetUID() string { + return i.UID +} + +func (i *DefaultInfo) GetGroups() []string { + return i.Groups +} + +func (i *DefaultInfo) GetExtra() map[string][]string { + return i.Extra +} + +// well-known user and group names +const ( + SystemPrivilegedGroup = "system:masters" + NodesGroup = "system:nodes" + MonitoringGroup = "system:monitoring" + AllUnauthenticated = "system:unauthenticated" + AllAuthenticated = "system:authenticated" + + Anonymous = "system:anonymous" + APIServerUser = "system:apiserver" + + // core kubernetes process identities + KubeProxy = "system:kube-proxy" + KubeControllerManager = "system:kube-controller-manager" + KubeScheduler = "system:kube-scheduler" +) diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go new file mode 100644 index 0000000000..ce70710fa3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go @@ -0,0 +1,159 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorizer + +import ( + "context" + "net/http" + + "k8s.io/apiserver/pkg/authentication/user" +) + +// Attributes is an interface used by an Authorizer to get information about a request +// that is used to make an authorization decision. +type Attributes interface { + // GetUser returns the user.Info object to authorize + GetUser() user.Info + + // GetVerb returns the kube verb associated with API requests (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy), + // or the lowercased HTTP verb associated with non-API requests (this includes get, put, post, patch, and delete) + GetVerb() string + + // When IsReadOnly() == true, the request has no side effects, other than + // caching, logging, and other incidentals. + IsReadOnly() bool + + // The namespace of the object, if a request is for a REST object. + GetNamespace() string + + // The kind of object, if a request is for a REST object. + GetResource() string + + // GetSubresource returns the subresource being requested, if present + GetSubresource() string + + // GetName returns the name of the object as parsed off the request. This will not be present for all request types, but + // will be present for: get, update, delete + GetName() string + + // The group of the resource, if a request is for a REST object. + GetAPIGroup() string + + // GetAPIVersion returns the version of the group requested, if a request is for a REST object. + GetAPIVersion() string + + // IsResourceRequest returns true for requests to API resources, like /api/v1/nodes, + // and false for non-resource endpoints like /api, /healthz + IsResourceRequest() bool + + // GetPath returns the path of the request + GetPath() string +} + +// Authorizer makes an authorization decision based on information gained by making +// zero or more calls to methods of the Attributes interface. It returns nil when an action is +// authorized, otherwise it returns an error. +type Authorizer interface { + Authorize(ctx context.Context, a Attributes) (authorized Decision, reason string, err error) +} + +type AuthorizerFunc func(a Attributes) (Decision, string, error) + +func (f AuthorizerFunc) Authorize(ctx context.Context, a Attributes) (Decision, string, error) { + return f(a) +} + +// RuleResolver provides a mechanism for resolving the list of rules that apply to a given user within a namespace. +type RuleResolver interface { + // RulesFor get the list of cluster wide rules, the list of rules in the specific namespace, incomplete status and errors. + RulesFor(user user.Info, namespace string) ([]ResourceRuleInfo, []NonResourceRuleInfo, bool, error) +} + +// RequestAttributesGetter provides a function that extracts Attributes from an http.Request +type RequestAttributesGetter interface { + GetRequestAttributes(user.Info, *http.Request) Attributes +} + +// AttributesRecord implements Attributes interface. +type AttributesRecord struct { + User user.Info + Verb string + Namespace string + APIGroup string + APIVersion string + Resource string + Subresource string + Name string + ResourceRequest bool + Path string +} + +func (a AttributesRecord) GetUser() user.Info { + return a.User +} + +func (a AttributesRecord) GetVerb() string { + return a.Verb +} + +func (a AttributesRecord) IsReadOnly() bool { + return a.Verb == "get" || a.Verb == "list" || a.Verb == "watch" +} + +func (a AttributesRecord) GetNamespace() string { + return a.Namespace +} + +func (a AttributesRecord) GetResource() string { + return a.Resource +} + +func (a AttributesRecord) GetSubresource() string { + return a.Subresource +} + +func (a AttributesRecord) GetName() string { + return a.Name +} + +func (a AttributesRecord) GetAPIGroup() string { + return a.APIGroup +} + +func (a AttributesRecord) GetAPIVersion() string { + return a.APIVersion +} + +func (a AttributesRecord) IsResourceRequest() bool { + return a.ResourceRequest +} + +func (a AttributesRecord) GetPath() string { + return a.Path +} + +type Decision int + +const ( + // DecisionDeny means that an authorizer decided to deny the action. + DecisionDeny Decision = iota + // DecisionAllow means that an authorizer decided to allow the action. + DecisionAllow + // DecisionNoOpionion means that an authorizer has no opinion on whether + // to allow or deny an action. + DecisionNoOpinion +) diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go new file mode 100644 index 0000000000..8f7d9d9eff --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorizer + +type ResourceRuleInfo interface { + // GetVerbs returns a list of kubernetes resource API verbs. + GetVerbs() []string + // GetAPIGroups return the names of the APIGroup that contains the resources. + GetAPIGroups() []string + // GetResources return a list of resources the rule applies to. + GetResources() []string + // GetResourceNames return a white list of names that the rule applies to. + GetResourceNames() []string +} + +// DefaultResourceRuleInfo holds information that describes a rule for the resource +type DefaultResourceRuleInfo struct { + Verbs []string + APIGroups []string + Resources []string + ResourceNames []string +} + +func (i *DefaultResourceRuleInfo) GetVerbs() []string { + return i.Verbs +} + +func (i *DefaultResourceRuleInfo) GetAPIGroups() []string { + return i.APIGroups +} + +func (i *DefaultResourceRuleInfo) GetResources() []string { + return i.Resources +} + +func (i *DefaultResourceRuleInfo) GetResourceNames() []string { + return i.ResourceNames +} + +type NonResourceRuleInfo interface { + // GetVerbs returns a list of kubernetes resource API verbs. + GetVerbs() []string + // GetNonResourceURLs return a set of partial urls that a user should have access to. + GetNonResourceURLs() []string +} + +// DefaultNonResourceRuleInfo holds information that describes a rule for the non-resource +type DefaultNonResourceRuleInfo struct { + Verbs []string + NonResourceURLs []string +} + +func (i *DefaultNonResourceRuleInfo) GetVerbs() []string { + return i.Verbs +} + +func (i *DefaultNonResourceRuleInfo) GetNonResourceURLs() []string { + return i.NonResourceURLs +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS new file mode 100644 index 0000000000..a5ccdcbd1b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- deads2k +- dims diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go new file mode 100644 index 0000000000..6fe3fa96ed --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go @@ -0,0 +1,95 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorizerfactory + +import ( + "context" + "errors" + + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// alwaysAllowAuthorizer is an implementation of authorizer.Attributes +// which always says yes to an authorization request. +// It is useful in tests and when using kubernetes in an open manner. +type alwaysAllowAuthorizer struct{} + +func (alwaysAllowAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) { + return authorizer.DecisionAllow, "", nil +} + +func (alwaysAllowAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { + return []authorizer.ResourceRuleInfo{ + &authorizer.DefaultResourceRuleInfo{ + Verbs: []string{"*"}, + APIGroups: []string{"*"}, + Resources: []string{"*"}, + }, + }, []authorizer.NonResourceRuleInfo{ + &authorizer.DefaultNonResourceRuleInfo{ + Verbs: []string{"*"}, + NonResourceURLs: []string{"*"}, + }, + }, false, nil +} + +func NewAlwaysAllowAuthorizer() *alwaysAllowAuthorizer { + return new(alwaysAllowAuthorizer) +} + +// alwaysDenyAuthorizer is an implementation of authorizer.Attributes +// which always says no to an authorization request. +// It is useful in unit tests to force an operation to be forbidden. +type alwaysDenyAuthorizer struct{} + +func (alwaysDenyAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (decision authorizer.Decision, reason string, err error) { + return authorizer.DecisionNoOpinion, "Everything is forbidden.", nil +} + +func (alwaysDenyAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { + return []authorizer.ResourceRuleInfo{}, []authorizer.NonResourceRuleInfo{}, false, nil +} + +func NewAlwaysDenyAuthorizer() *alwaysDenyAuthorizer { + return new(alwaysDenyAuthorizer) +} + +type privilegedGroupAuthorizer struct { + groups []string +} + +func (r *privilegedGroupAuthorizer) Authorize(ctx context.Context, attr authorizer.Attributes) (authorizer.Decision, string, error) { + if attr.GetUser() == nil { + return authorizer.DecisionNoOpinion, "Error", errors.New("no user on request.") + } + for _, attr_group := range attr.GetUser().GetGroups() { + for _, priv_group := range r.groups { + if priv_group == attr_group { + return authorizer.DecisionAllow, "", nil + } + } + } + return authorizer.DecisionNoOpinion, "", nil +} + +// NewPrivilegedGroups is for use in loopback scenarios +func NewPrivilegedGroups(groups ...string) *privilegedGroupAuthorizer { + return &privilegedGroupAuthorizer{ + groups: groups, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go new file mode 100644 index 0000000000..665483308f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorizerfactory + +import ( + "errors" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/plugin/pkg/authorizer/webhook" + authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1" +) + +// DelegatingAuthorizerConfig is the minimal configuration needed to create an authenticator +// built to delegate authorization to a kube API server +type DelegatingAuthorizerConfig struct { + SubjectAccessReviewClient authorizationclient.SubjectAccessReviewInterface + + // AllowCacheTTL is the length of time that a successful authorization response will be cached + AllowCacheTTL time.Duration + + // DenyCacheTTL is the length of time that an unsuccessful authorization response will be cached. + // You generally want more responsive, "deny, try again" flows. + DenyCacheTTL time.Duration + + // WebhookRetryBackoff specifies the backoff parameters for the authorization webhook retry logic. + // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed + // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. + WebhookRetryBackoff *wait.Backoff +} + +func (c DelegatingAuthorizerConfig) New() (authorizer.Authorizer, error) { + if c.WebhookRetryBackoff == nil { + return nil, errors.New("retry backoff parameters for delegating authorization webhook has not been specified") + } + + return webhook.NewFromInterface( + c.SubjectAccessReviewClient, + c.AllowCacheTTL, + c.DenyCacheTTL, + *c.WebhookRetryBackoff, + ) +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/path/doc.go b/vendor/k8s.io/apiserver/pkg/authorization/path/doc.go new file mode 100644 index 0000000000..654aaeb742 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/path/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package path contains an authorizer that allows certain paths and path prefixes. +package path // import "k8s.io/apiserver/pkg/authorization/path" diff --git a/vendor/k8s.io/apiserver/pkg/authorization/path/path.go b/vendor/k8s.io/apiserver/pkg/authorization/path/path.go new file mode 100644 index 0000000000..03f524b38c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/path/path.go @@ -0,0 +1,67 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// NewAuthorizer returns an authorizer which accepts a given set of paths. +// Each path is either a fully matching path or it ends in * in case a prefix match is done. A leading / is optional. +func NewAuthorizer(alwaysAllowPaths []string) (authorizer.Authorizer, error) { + var prefixes []string + paths := sets.NewString() + for _, p := range alwaysAllowPaths { + p = strings.TrimPrefix(p, "/") + if len(p) == 0 { + // matches "/" + paths.Insert(p) + continue + } + if strings.ContainsRune(p[:len(p)-1], '*') { + return nil, fmt.Errorf("only trailing * allowed in %q", p) + } + if strings.HasSuffix(p, "*") { + prefixes = append(prefixes, p[:len(p)-1]) + } else { + paths.Insert(p) + } + } + + return authorizer.AuthorizerFunc(func(a authorizer.Attributes) (authorizer.Decision, string, error) { + if a.IsResourceRequest() { + return authorizer.DecisionNoOpinion, "", nil + } + + pth := strings.TrimPrefix(a.GetPath(), "/") + if paths.Has(pth) { + return authorizer.DecisionAllow, "", nil + } + + for _, prefix := range prefixes { + if strings.HasPrefix(pth, prefix) { + return authorizer.DecisionAllow, "", nil + } + } + + return authorizer.DecisionNoOpinion, "", nil + }), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/union/union.go b/vendor/k8s.io/apiserver/pkg/authorization/union/union.go new file mode 100644 index 0000000000..460d9a4ab5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/union/union.go @@ -0,0 +1,106 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package union implements an authorizer that combines multiple subauthorizer. +// The union authorizer iterates over each subauthorizer and returns the first +// decision that is either an Allow decision or a Deny decision. If a +// subauthorizer returns a NoOpinion, then the union authorizer moves onto the +// next authorizer or, if the subauthorizer was the last authorizer, returns +// NoOpinion as the aggregate decision. I.e. union authorizer creates an +// aggregate decision and supports short-circuit allows and denies from +// subauthorizers. +package union + +import ( + "context" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// unionAuthzHandler authorizer against a chain of authorizer.Authorizer +type unionAuthzHandler []authorizer.Authorizer + +// New returns an authorizer that authorizes against a chain of authorizer.Authorizer objects +func New(authorizationHandlers ...authorizer.Authorizer) authorizer.Authorizer { + return unionAuthzHandler(authorizationHandlers) +} + +// Authorizes against a chain of authorizer.Authorizer objects and returns nil if successful and returns error if unsuccessful +func (authzHandler unionAuthzHandler) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) { + var ( + errlist []error + reasonlist []string + ) + + for _, currAuthzHandler := range authzHandler { + decision, reason, err := currAuthzHandler.Authorize(ctx, a) + + if err != nil { + errlist = append(errlist, err) + } + if len(reason) != 0 { + reasonlist = append(reasonlist, reason) + } + switch decision { + case authorizer.DecisionAllow, authorizer.DecisionDeny: + return decision, reason, err + case authorizer.DecisionNoOpinion: + // continue to the next authorizer + } + } + + return authorizer.DecisionNoOpinion, strings.Join(reasonlist, "\n"), utilerrors.NewAggregate(errlist) +} + +// unionAuthzRulesHandler authorizer against a chain of authorizer.RuleResolver +type unionAuthzRulesHandler []authorizer.RuleResolver + +// NewRuleResolvers returns an authorizer that authorizes against a chain of authorizer.Authorizer objects +func NewRuleResolvers(authorizationHandlers ...authorizer.RuleResolver) authorizer.RuleResolver { + return unionAuthzRulesHandler(authorizationHandlers) +} + +// RulesFor against a chain of authorizer.RuleResolver objects and returns nil if successful and returns error if unsuccessful +func (authzHandler unionAuthzRulesHandler) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { + var ( + errList []error + resourceRulesList []authorizer.ResourceRuleInfo + nonResourceRulesList []authorizer.NonResourceRuleInfo + ) + incompleteStatus := false + + for _, currAuthzHandler := range authzHandler { + resourceRules, nonResourceRules, incomplete, err := currAuthzHandler.RulesFor(user, namespace) + + if incomplete { + incompleteStatus = true + } + if err != nil { + errList = append(errList, err) + } + if len(resourceRules) > 0 { + resourceRulesList = append(resourceRulesList, resourceRules...) + } + if len(nonResourceRules) > 0 { + nonResourceRulesList = append(nonResourceRulesList, nonResourceRules...) + } + } + + return resourceRulesList, nonResourceRulesList, incompleteStatus, utilerrors.NewAggregate(errList) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go b/vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go new file mode 100644 index 0000000000..3d0123b23f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go @@ -0,0 +1,133 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deprecation + +import ( + "fmt" + "regexp" + "strconv" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/version" +) + +type apiLifecycleDeprecated interface { + APILifecycleDeprecated() (major, minor int) +} + +type apiLifecycleRemoved interface { + APILifecycleRemoved() (major, minor int) +} + +type apiLifecycleReplacement interface { + APILifecycleReplacement() schema.GroupVersionKind +} + +// extract all digits at the beginning of the string +var leadingDigits = regexp.MustCompile(`^(\d+)`) + +// MajorMinor parses a numeric major/minor version from the provided version info. +// The minor version drops all characters after the first non-digit character: +// version.Info{Major:"1", Minor:"2+"} -> 1,2 +// version.Info{Major:"1", Minor:"2.3-build4"} -> 1,2 +func MajorMinor(v version.Info) (int, int, error) { + major, err := strconv.Atoi(v.Major) + if err != nil { + return 0, 0, err + } + minor, err := strconv.Atoi(leadingDigits.FindString(v.Minor)) + if err != nil { + return 0, 0, err + } + return major, minor, nil +} + +// IsDeprecated returns true if obj implements APILifecycleDeprecated() and returns +// a major/minor version that is non-zero and is <= the specified current major/minor version. +func IsDeprecated(obj runtime.Object, currentMajor, currentMinor int) bool { + deprecated, isDeprecated := obj.(apiLifecycleDeprecated) + if !isDeprecated { + return false + } + + deprecatedMajor, deprecatedMinor := deprecated.APILifecycleDeprecated() + // no deprecation version expressed + if deprecatedMajor == 0 && deprecatedMinor == 0 { + return false + } + // no current version info available + if currentMajor == 0 && currentMinor == 0 { + return true + } + // compare deprecation version to current version + if deprecatedMajor > currentMajor { + return false + } + if deprecatedMajor == currentMajor && deprecatedMinor > currentMinor { + return false + } + return true +} + +// RemovedRelease returns the major/minor version in which the given object is unavailable (in the form ".") +// if the object implements APILifecycleRemoved() to indicate a non-zero removal version, and returns an empty string otherwise. +func RemovedRelease(obj runtime.Object) string { + if removed, hasRemovalInfo := obj.(apiLifecycleRemoved); hasRemovalInfo { + removedMajor, removedMinor := removed.APILifecycleRemoved() + if removedMajor != 0 || removedMinor != 0 { + return fmt.Sprintf("%d.%d", removedMajor, removedMinor) + } + } + return "" +} + +// WarningMessage returns a human-readable deprecation warning if the object implements APILifecycleDeprecated() +// to indicate a non-zero deprecated major/minor version and has a populated GetObjectKind().GroupVersionKind(). +func WarningMessage(obj runtime.Object) string { + deprecated, isDeprecated := obj.(apiLifecycleDeprecated) + if !isDeprecated { + return "" + } + + deprecatedMajor, deprecatedMinor := deprecated.APILifecycleDeprecated() + if deprecatedMajor == 0 && deprecatedMinor == 0 { + return "" + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if gvk.Empty() { + return "" + } + deprecationWarning := fmt.Sprintf("%s %s is deprecated in v%d.%d+", gvk.GroupVersion().String(), gvk.Kind, deprecatedMajor, deprecatedMinor) + + if removed, hasRemovalInfo := obj.(apiLifecycleRemoved); hasRemovalInfo { + removedMajor, removedMinor := removed.APILifecycleRemoved() + if removedMajor != 0 || removedMinor != 0 { + deprecationWarning = deprecationWarning + fmt.Sprintf(", unavailable in v%d.%d+", removedMajor, removedMinor) + } + } + + if replaced, hasReplacement := obj.(apiLifecycleReplacement); hasReplacement { + replacement := replaced.APILifecycleReplacement() + if !replacement.Empty() { + deprecationWarning = deprecationWarning + fmt.Sprintf("; use %s %s", replacement.GroupVersion().String(), replacement.Kind) + } + } + + return deprecationWarning +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/addresses.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/addresses.go new file mode 100644 index 0000000000..d175d15fe0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/addresses.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "net" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Addresses interface { + ServerAddressByClientCIDRs(net.IP) []metav1.ServerAddressByClientCIDR +} + +// DefaultAddresses is a default implementation of Addresses that will work in most cases +type DefaultAddresses struct { + // CIDRRules is a list of CIDRs and Addresses to use if a client is in the range + CIDRRules []CIDRRule + + // DefaultAddress is the address (hostname or IP and port) that should be used in + // if no CIDR matches more specifically. + DefaultAddress string +} + +// CIDRRule is a rule for adding an alternate path to the master based on matching CIDR +type CIDRRule struct { + IPRange net.IPNet + + // Address is the address (hostname or IP and port) that should be used in + // if this CIDR matches + Address string +} + +func (d DefaultAddresses) ServerAddressByClientCIDRs(clientIP net.IP) []metav1.ServerAddressByClientCIDR { + addressCIDRMap := []metav1.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: d.DefaultAddress, + }, + } + + for _, rule := range d.CIDRRules { + addressCIDRMap = append(addressCIDRMap, rule.ServerAddressByClientCIDRs(clientIP)...) + } + return addressCIDRMap +} + +func (d CIDRRule) ServerAddressByClientCIDRs(clientIP net.IP) []metav1.ServerAddressByClientCIDR { + addressCIDRMap := []metav1.ServerAddressByClientCIDR{} + + if d.IPRange.Contains(clientIP) { + addressCIDRMap = append(addressCIDRMap, metav1.ServerAddressByClientCIDR{ + ClientCIDR: d.IPRange.String(), + ServerAddress: d.Address, + }) + } + return addressCIDRMap +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/group.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/group.go new file mode 100644 index 0000000000..7e9927a3a5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/group.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "net/http" + + "github.com/emicklei/go-restful" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" +) + +// APIGroupHandler creates a webservice serving the supported versions, preferred version, and name +// of a group. E.g., such a web service will be registered at /apis/extensions. +type APIGroupHandler struct { + serializer runtime.NegotiatedSerializer + group metav1.APIGroup +} + +func NewAPIGroupHandler(serializer runtime.NegotiatedSerializer, group metav1.APIGroup) *APIGroupHandler { + if keepUnversioned(group.Name) { + // Because in release 1.1, /apis/extensions returns response with empty + // APIVersion, we use stripVersionNegotiatedSerializer to keep the + // response backwards compatible. + serializer = stripVersionNegotiatedSerializer{serializer} + } + + return &APIGroupHandler{ + serializer: serializer, + group: group, + } +} + +func (s *APIGroupHandler) WebService() *restful.WebService { + mediaTypes, _ := negotiation.MediaTypesForSerializer(s.serializer) + ws := new(restful.WebService) + ws.Path(APIGroupPrefix + "/" + s.group.Name) + ws.Doc("get information of a group") + ws.Route(ws.GET("/").To(s.handle). + Doc("get information of a group"). + Operation("getAPIGroup"). + Produces(mediaTypes...). + Consumes(mediaTypes...). + Writes(metav1.APIGroup{})) + return ws +} + +// handle returns a handler which will return the api.GroupAndVersion of the group. +func (s *APIGroupHandler) handle(req *restful.Request, resp *restful.Response) { + s.ServeHTTP(resp.ResponseWriter, req.Request) +} + +func (s *APIGroupHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, w, req, http.StatusOK, &s.group) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/legacy.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/legacy.go new file mode 100644 index 0000000000..b33ecec653 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/legacy.go @@ -0,0 +1,76 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "net/http" + + "github.com/emicklei/go-restful" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" +) + +// legacyRootAPIHandler creates a webservice serving api group discovery. +type legacyRootAPIHandler struct { + // addresses is used to build cluster IPs for discovery. + addresses Addresses + apiPrefix string + serializer runtime.NegotiatedSerializer +} + +func NewLegacyRootAPIHandler(addresses Addresses, serializer runtime.NegotiatedSerializer, apiPrefix string) *legacyRootAPIHandler { + // Because in release 1.1, /apis returns response with empty APIVersion, we + // use stripVersionNegotiatedSerializer to keep the response backwards + // compatible. + serializer = stripVersionNegotiatedSerializer{serializer} + + return &legacyRootAPIHandler{ + addresses: addresses, + apiPrefix: apiPrefix, + serializer: serializer, + } +} + +// AddApiWebService adds a service to return the supported api versions at the legacy /api. +func (s *legacyRootAPIHandler) WebService() *restful.WebService { + mediaTypes, _ := negotiation.MediaTypesForSerializer(s.serializer) + ws := new(restful.WebService) + ws.Path(s.apiPrefix) + ws.Doc("get available API versions") + ws.Route(ws.GET("/").To(s.handle). + Doc("get available API versions"). + Operation("getAPIVersions"). + Produces(mediaTypes...). + Consumes(mediaTypes...). + Writes(metav1.APIVersions{})) + return ws +} + +func (s *legacyRootAPIHandler) handle(req *restful.Request, resp *restful.Response) { + clientIP := utilnet.GetClientIP(req.Request) + apiVersions := &metav1.APIVersions{ + ServerAddressByClientCIDRs: s.addresses.ServerAddressByClientCIDRs(clientIP), + Versions: []string{"v1"}, + } + + responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, resp.ResponseWriter, req.Request, http.StatusOK, apiVersions) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go new file mode 100644 index 0000000000..beba9c8a41 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go @@ -0,0 +1,135 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "net/http" + "sync" + + restful "github.com/emicklei/go-restful" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" +) + +// GroupManager is an interface that allows dynamic mutation of the existing webservice to handle +// API groups being added or removed. +type GroupManager interface { + AddGroup(apiGroup metav1.APIGroup) + RemoveGroup(groupName string) + + WebService() *restful.WebService +} + +// rootAPIsHandler creates a webservice serving api group discovery. +// The list of APIGroups may change while the server is running because additional resources +// are registered or removed. It is not safe to cache the values. +type rootAPIsHandler struct { + // addresses is used to build cluster IPs for discovery. + addresses Addresses + + serializer runtime.NegotiatedSerializer + + // Map storing information about all groups to be exposed in discovery response. + // The map is from name to the group. + lock sync.RWMutex + apiGroups map[string]metav1.APIGroup + // apiGroupNames preserves insertion order + apiGroupNames []string +} + +func NewRootAPIsHandler(addresses Addresses, serializer runtime.NegotiatedSerializer) *rootAPIsHandler { + // Because in release 1.1, /apis returns response with empty APIVersion, we + // use stripVersionNegotiatedSerializer to keep the response backwards + // compatible. + serializer = stripVersionNegotiatedSerializer{serializer} + + return &rootAPIsHandler{ + addresses: addresses, + serializer: serializer, + apiGroups: map[string]metav1.APIGroup{}, + } +} + +func (s *rootAPIsHandler) AddGroup(apiGroup metav1.APIGroup) { + s.lock.Lock() + defer s.lock.Unlock() + + _, alreadyExists := s.apiGroups[apiGroup.Name] + + s.apiGroups[apiGroup.Name] = apiGroup + if !alreadyExists { + s.apiGroupNames = append(s.apiGroupNames, apiGroup.Name) + } +} + +func (s *rootAPIsHandler) RemoveGroup(groupName string) { + s.lock.Lock() + defer s.lock.Unlock() + + delete(s.apiGroups, groupName) + for i := range s.apiGroupNames { + if s.apiGroupNames[i] == groupName { + s.apiGroupNames = append(s.apiGroupNames[:i], s.apiGroupNames[i+1:]...) + break + } + } +} + +func (s *rootAPIsHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + s.lock.RLock() + defer s.lock.RUnlock() + + orderedGroups := []metav1.APIGroup{} + for _, groupName := range s.apiGroupNames { + orderedGroups = append(orderedGroups, s.apiGroups[groupName]) + } + + clientIP := utilnet.GetClientIP(req) + serverCIDR := s.addresses.ServerAddressByClientCIDRs(clientIP) + groups := make([]metav1.APIGroup, len(orderedGroups)) + for i := range orderedGroups { + groups[i] = orderedGroups[i] + groups[i].ServerAddressByClientCIDRs = serverCIDR + } + + responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, resp, req, http.StatusOK, &metav1.APIGroupList{Groups: groups}) +} + +func (s *rootAPIsHandler) restfulHandle(req *restful.Request, resp *restful.Response) { + s.ServeHTTP(resp.ResponseWriter, req.Request) +} + +// WebService returns a webservice serving api group discovery. +// Note: during the server runtime apiGroups might change. +func (s *rootAPIsHandler) WebService() *restful.WebService { + mediaTypes, _ := negotiation.MediaTypesForSerializer(s.serializer) + ws := new(restful.WebService) + ws.Path(APIGroupPrefix) + ws.Doc("get available API versions") + ws.Route(ws.GET("/").To(s.restfulHandle). + Doc("get available API versions"). + Operation("getAPIVersions"). + Produces(mediaTypes...). + Consumes(mediaTypes...). + Writes(metav1.APIGroupList{})) + return ws +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/storageversionhash.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/storageversionhash.go new file mode 100644 index 0000000000..a1b00decba --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/storageversionhash.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "crypto/sha256" + "encoding/base64" +) + +// StorageVersionHash calculates the storage version hash for a +// tuple. +// WARNING: this function is subject to change. Clients shouldn't depend on +// this function. +func StorageVersionHash(group, version, kind string) string { + gvk := group + "/" + version + "/" + kind + if gvk == "" { + return "" + } + bytes := sha256.Sum256([]byte(gvk)) + // Assuming there are N kinds in the cluster, and the hash is X-byte long, + // the chance of colliding hash P(N,X) approximates to 1-e^(-(N^2)/2^(8X+1)). + // P(10,000, 8) ~= 2.7*10^(-12), which is low enough. + // See https://en.wikipedia.org/wiki/Birthday_problem#Approximations. + return base64.StdEncoding.EncodeToString( + bytes[:8]) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go new file mode 100644 index 0000000000..7487ffc18a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" +) + +const APIGroupPrefix = "/apis" + +func keepUnversioned(group string) bool { + return group == "" || group == "extensions" +} + +// stripVersionEncoder strips APIVersion field from the encoding output. It's +// used to keep the responses at the discovery endpoints backward compatible +// with release-1.1, when the responses have empty APIVersion. +type stripVersionEncoder struct { + encoder runtime.Encoder + serializer runtime.Serializer + identifier runtime.Identifier +} + +func newStripVersionEncoder(e runtime.Encoder, s runtime.Serializer) runtime.Encoder { + return stripVersionEncoder{ + encoder: e, + serializer: s, + identifier: identifier(e), + } +} + +func identifier(e runtime.Encoder) runtime.Identifier { + result := map[string]string{ + "name": "stripVersion", + } + if e != nil { + result["encoder"] = string(e.Identifier()) + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for stripVersionEncoder: %v", err) + } + return runtime.Identifier(identifier) +} + +func (c stripVersionEncoder) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(c.Identifier(), c.doEncode, w) + } + return c.doEncode(obj, w) +} + +func (c stripVersionEncoder) doEncode(obj runtime.Object, w io.Writer) error { + buf := bytes.NewBuffer([]byte{}) + err := c.encoder.Encode(obj, buf) + if err != nil { + return err + } + roundTrippedObj, gvk, err := c.serializer.Decode(buf.Bytes(), nil, nil) + if err != nil { + return err + } + gvk.Group = "" + gvk.Version = "" + roundTrippedObj.GetObjectKind().SetGroupVersionKind(*gvk) + return c.serializer.Encode(roundTrippedObj, w) +} + +// Identifier implements runtime.Encoder interface. +func (c stripVersionEncoder) Identifier() runtime.Identifier { + return c.identifier +} + +// stripVersionNegotiatedSerializer will return stripVersionEncoder when +// EncoderForVersion is called. See comments for stripVersionEncoder. +type stripVersionNegotiatedSerializer struct { + runtime.NegotiatedSerializer +} + +func (n stripVersionNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { + serializer, ok := encoder.(runtime.Serializer) + if !ok { + // The stripVersionEncoder needs both an encoder and decoder, but is called from a context that doesn't have access to the + // decoder. We do a best effort cast here (since this code path is only for backwards compatibility) to get access to the caller's + // decoder. + panic(fmt.Sprintf("Unable to extract serializer from %#v", encoder)) + } + versioned := n.NegotiatedSerializer.EncoderForVersion(encoder, gv) + return newStripVersionEncoder(versioned, serializer) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/version.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/version.go new file mode 100644 index 0000000000..0976041bff --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/version.go @@ -0,0 +1,83 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "net/http" + + restful "github.com/emicklei/go-restful" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" +) + +type APIResourceLister interface { + ListAPIResources() []metav1.APIResource +} + +type APIResourceListerFunc func() []metav1.APIResource + +func (f APIResourceListerFunc) ListAPIResources() []metav1.APIResource { + return f() +} + +// APIVersionHandler creates a webservice serving the supported resources for the version +// E.g., such a web service will be registered at /apis/extensions/v1beta1. +type APIVersionHandler struct { + serializer runtime.NegotiatedSerializer + + groupVersion schema.GroupVersion + apiResourceLister APIResourceLister +} + +func NewAPIVersionHandler(serializer runtime.NegotiatedSerializer, groupVersion schema.GroupVersion, apiResourceLister APIResourceLister) *APIVersionHandler { + if keepUnversioned(groupVersion.Group) { + // Because in release 1.1, /apis/extensions returns response with empty + // APIVersion, we use stripVersionNegotiatedSerializer to keep the + // response backwards compatible. + serializer = stripVersionNegotiatedSerializer{serializer} + } + + return &APIVersionHandler{ + serializer: serializer, + groupVersion: groupVersion, + apiResourceLister: apiResourceLister, + } +} + +func (s *APIVersionHandler) AddToWebService(ws *restful.WebService) { + mediaTypes, _ := negotiation.MediaTypesForSerializer(s.serializer) + ws.Route(ws.GET("/").To(s.handle). + Doc("get available resources"). + Operation("getAPIResources"). + Produces(mediaTypes...). + Consumes(mediaTypes...). + Writes(metav1.APIResourceList{})) +} + +// handle returns a handler which will return the api.VersionAndVersion of the group. +func (s *APIVersionHandler) handle(req *restful.Request, resp *restful.Response) { + s.ServeHTTP(resp.ResponseWriter, req.Request) +} + +func (s *APIVersionHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, w, req, http.StatusOK, + &metav1.APIResourceList{GroupVersion: s.groupVersion.String(), APIResources: s.apiResourceLister.ListAPIResources()}) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/doc.go b/vendor/k8s.io/apiserver/pkg/endpoints/doc.go new file mode 100644 index 0000000000..ef99114b61 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package endpoints contains the generic code that provides a RESTful Kubernetes-style API service. +package endpoints // import "k8s.io/apiserver/pkg/endpoints" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go b/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go new file mode 100644 index 0000000000..04264230d8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go @@ -0,0 +1,96 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filterlatency + +import ( + "context" + "net/http" + "time" + + utilclock "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/endpoints/metrics" + apirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +type requestFilterRecordKeyType int + +// requestFilterRecordKey is the context key for a request filter record struct. +const requestFilterRecordKey requestFilterRecordKeyType = iota + +type requestFilterRecord struct { + name string + startedTimestamp time.Time +} + +// withRequestFilterRecord attaches the given request filter record to the parent context. +func withRequestFilterRecord(parent context.Context, fr *requestFilterRecord) context.Context { + return apirequest.WithValue(parent, requestFilterRecordKey, fr) +} + +// requestFilterRecordFrom returns the request filter record from the given context. +func requestFilterRecordFrom(ctx context.Context) *requestFilterRecord { + fr, _ := ctx.Value(requestFilterRecordKey).(*requestFilterRecord) + return fr +} + +// TrackStarted measures the timestamp the given handler has started execution +// by attaching a handler to the chain. +func TrackStarted(handler http.Handler, name string) http.Handler { + return trackStarted(handler, name, utilclock.RealClock{}) +} + +// TrackCompleted measures the timestamp the given handler has completed execution and then +// it updates the corresponding metric with the filter latency duration. +func TrackCompleted(handler http.Handler) http.Handler { + return trackCompleted(handler, utilclock.RealClock{}, func(fr *requestFilterRecord, completedAt time.Time) { + metrics.RecordFilterLatency(fr.name, completedAt.Sub(fr.startedTimestamp)) + }) +} + +func trackStarted(handler http.Handler, name string, clock utilclock.PassiveClock) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if fr := requestFilterRecordFrom(ctx); fr != nil { + fr.name = name + fr.startedTimestamp = clock.Now() + + handler.ServeHTTP(w, r) + return + } + + fr := &requestFilterRecord{ + name: name, + startedTimestamp: clock.Now(), + } + r = r.WithContext(withRequestFilterRecord(ctx, fr)) + handler.ServeHTTP(w, r) + }) +} + +func trackCompleted(handler http.Handler, clock utilclock.PassiveClock, action func(*requestFilterRecord, time.Time)) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // The previous filter has just completed. + completedAt := clock.Now() + + defer handler.ServeHTTP(w, r) + + ctx := r.Context() + if fr := requestFilterRecordFrom(ctx); fr != nil { + action(fr, completedAt) + } + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/OWNERS b/vendor/k8s.io/apiserver/pkg/endpoints/filters/OWNERS new file mode 100644 index 0000000000..05259ce353 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- deads2k +- sttts +- soltysh diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go new file mode 100644 index 0000000000..891d609354 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go @@ -0,0 +1,256 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "bufio" + "errors" + "fmt" + "net" + "net/http" + "sync" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/audit/policy" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" +) + +// WithAudit decorates a http.Handler with audit logging information for all the +// requests coming to the server. Audit level is decided according to requests' +// attributes and audit policy. Logs are emitted to the audit sink to +// process events. If sink or audit policy is nil, no decoration takes place. +func WithAudit(handler http.Handler, sink audit.Sink, policy policy.Checker, longRunningCheck request.LongRunningRequestCheck) http.Handler { + if sink == nil || policy == nil { + return handler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + req, ev, omitStages, err := createAuditEventAndAttachToContext(req, policy) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err)) + responsewriters.InternalError(w, req, errors.New("failed to create audit event")) + return + } + ctx := req.Context() + if ev == nil || ctx == nil { + handler.ServeHTTP(w, req) + return + } + + ev.Stage = auditinternal.StageRequestReceived + if processed := processAuditEvent(sink, ev, omitStages); !processed { + audit.ApiserverAuditDroppedCounter.Inc() + responsewriters.InternalError(w, req, errors.New("failed to store audit event")) + return + } + + // intercept the status code + var longRunningSink audit.Sink + if longRunningCheck != nil { + ri, _ := request.RequestInfoFrom(ctx) + if longRunningCheck(req, ri) { + longRunningSink = sink + } + } + respWriter := decorateResponseWriter(w, ev, longRunningSink, omitStages) + + // send audit event when we leave this func, either via a panic or cleanly. In the case of long + // running requests, this will be the second audit event. + defer func() { + if r := recover(); r != nil { + defer panic(r) + ev.Stage = auditinternal.StagePanic + ev.ResponseStatus = &metav1.Status{ + Code: http.StatusInternalServerError, + Status: metav1.StatusFailure, + Reason: metav1.StatusReasonInternalError, + Message: fmt.Sprintf("APIServer panic'd: %v", r), + } + processAuditEvent(sink, ev, omitStages) + return + } + + // if no StageResponseStarted event was sent b/c neither a status code nor a body was sent, fake it here + // But Audit-Id http header will only be sent when http.ResponseWriter.WriteHeader is called. + fakedSuccessStatus := &metav1.Status{ + Code: http.StatusOK, + Status: metav1.StatusSuccess, + Message: "Connection closed early", + } + if ev.ResponseStatus == nil && longRunningSink != nil { + ev.ResponseStatus = fakedSuccessStatus + ev.Stage = auditinternal.StageResponseStarted + processAuditEvent(longRunningSink, ev, omitStages) + } + + ev.Stage = auditinternal.StageResponseComplete + if ev.ResponseStatus == nil { + ev.ResponseStatus = fakedSuccessStatus + } + processAuditEvent(sink, ev, omitStages) + }() + handler.ServeHTTP(respWriter, req) + }) +} + +// createAuditEventAndAttachToContext is responsible for creating the audit event +// and attaching it to the appropriate request context. It returns: +// - context with audit event attached to it +// - created audit event +// - error if anything bad happened +func createAuditEventAndAttachToContext(req *http.Request, policy policy.Checker) (*http.Request, *auditinternal.Event, []auditinternal.Stage, error) { + ctx := req.Context() + + attribs, err := GetAuthorizerAttributes(ctx) + if err != nil { + return req, nil, nil, fmt.Errorf("failed to GetAuthorizerAttributes: %v", err) + } + + level, omitStages := policy.LevelAndStages(attribs) + audit.ObservePolicyLevel(level) + if level == auditinternal.LevelNone { + // Don't audit. + return req, nil, nil, nil + } + + requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(ctx) + if !ok { + requestReceivedTimestamp = time.Now() + } + ev, err := audit.NewEventFromRequest(req, requestReceivedTimestamp, level, attribs) + if err != nil { + return req, nil, nil, fmt.Errorf("failed to complete audit event from request: %v", err) + } + + req = req.WithContext(request.WithAuditEvent(ctx, ev)) + + return req, ev, omitStages, nil +} + +func processAuditEvent(sink audit.Sink, ev *auditinternal.Event, omitStages []auditinternal.Stage) bool { + for _, stage := range omitStages { + if ev.Stage == stage { + return true + } + } + + if ev.Stage == auditinternal.StageRequestReceived { + ev.StageTimestamp = metav1.NewMicroTime(ev.RequestReceivedTimestamp.Time) + } else { + ev.StageTimestamp = metav1.NewMicroTime(time.Now()) + } + audit.ObserveEvent() + return sink.ProcessEvents(ev) +} + +func decorateResponseWriter(responseWriter http.ResponseWriter, ev *auditinternal.Event, sink audit.Sink, omitStages []auditinternal.Stage) http.ResponseWriter { + delegate := &auditResponseWriter{ + ResponseWriter: responseWriter, + event: ev, + sink: sink, + omitStages: omitStages, + } + + // check if the ResponseWriter we're wrapping is the fancy one we need + // or if the basic is sufficient + _, cn := responseWriter.(http.CloseNotifier) + _, fl := responseWriter.(http.Flusher) + _, hj := responseWriter.(http.Hijacker) + if cn && fl && hj { + return &fancyResponseWriterDelegator{delegate} + } + return delegate +} + +var _ http.ResponseWriter = &auditResponseWriter{} + +// auditResponseWriter intercepts WriteHeader, sets it in the event. If the sink is set, it will +// create immediately an event (for long running requests). +type auditResponseWriter struct { + http.ResponseWriter + event *auditinternal.Event + once sync.Once + sink audit.Sink + omitStages []auditinternal.Stage +} + +func (a *auditResponseWriter) setHttpHeader() { + a.ResponseWriter.Header().Set(auditinternal.HeaderAuditID, string(a.event.AuditID)) +} + +func (a *auditResponseWriter) processCode(code int) { + a.once.Do(func() { + if a.event.ResponseStatus == nil { + a.event.ResponseStatus = &metav1.Status{} + } + a.event.ResponseStatus.Code = int32(code) + a.event.Stage = auditinternal.StageResponseStarted + + if a.sink != nil { + processAuditEvent(a.sink, a.event, a.omitStages) + } + }) +} + +func (a *auditResponseWriter) Write(bs []byte) (int, error) { + // the Go library calls WriteHeader internally if no code was written yet. But this will go unnoticed for us + a.processCode(http.StatusOK) + a.setHttpHeader() + return a.ResponseWriter.Write(bs) +} + +func (a *auditResponseWriter) WriteHeader(code int) { + a.processCode(code) + a.setHttpHeader() + a.ResponseWriter.WriteHeader(code) +} + +// fancyResponseWriterDelegator implements http.CloseNotifier, http.Flusher and +// http.Hijacker which are needed to make certain http operation (e.g. watch, rsh, etc) +// working. +type fancyResponseWriterDelegator struct { + *auditResponseWriter +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + // fake a response status before protocol switch happens + f.processCode(http.StatusSwitchingProtocols) + + // This will be ignored if WriteHeader() function has already been called. + // It's not guaranteed Audit-ID http header is sent for all requests. + // For example, when user run "kubectl exec", apiserver uses a proxy handler + // to deal with the request, users can only get http headers returned by kubelet node. + f.setHttpHeader() + + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +var _ http.CloseNotifier = &fancyResponseWriterDelegator{} +var _ http.Flusher = &fancyResponseWriterDelegator{} +var _ http.Hijacker = &fancyResponseWriterDelegator{} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_annotations.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_annotations.go new file mode 100644 index 0000000000..22b276991c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_annotations.go @@ -0,0 +1,39 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" + + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/audit/policy" +) + +// WithAuditAnnotations decorates a http.Handler with a []{key, value} that is merged +// with the audit.Event.Annotations map. This allows layers that run before WithAudit +// (such as authentication) to assert annotations. +// If sink or audit policy is nil, no decoration takes place. +func WithAuditAnnotations(handler http.Handler, sink audit.Sink, policy policy.Checker) http.Handler { + // no need to wrap if auditing is disabled + if sink == nil || policy == nil { + return handler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + req = req.WithContext(audit.WithAuditAnnotations(req.Context())) + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go new file mode 100644 index 0000000000..e88e7ad28d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go @@ -0,0 +1,94 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "errors" + "fmt" + "net/http" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/klog/v2" +) + +// WithAuthentication creates an http handler that tries to authenticate the given request as a user, and then +// stores any such user found onto the provided context for the request. If authentication fails or returns an error +// the failed handler is used. On success, "Authorization" header is removed from the request and handler +// is invoked to serve the request. +func WithAuthentication(handler http.Handler, auth authenticator.Request, failed http.Handler, apiAuds authenticator.Audiences) http.Handler { + if auth == nil { + klog.Warningf("Authentication is disabled") + return handler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + authenticationStart := time.Now() + + if len(apiAuds) > 0 { + req = req.WithContext(authenticator.WithAudiences(req.Context(), apiAuds)) + } + resp, ok, err := auth.AuthenticateRequest(req) + defer recordAuthMetrics(resp, ok, err, apiAuds, authenticationStart) + if err != nil || !ok { + if err != nil { + klog.Errorf("Unable to authenticate the request due to an error: %v", err) + } + failed.ServeHTTP(w, req) + return + } + + if !audiencesAreAcceptable(apiAuds, resp.Audiences) { + err = fmt.Errorf("unable to match the audience: %v , accepted: %v", resp.Audiences, apiAuds) + klog.Error(err) + failed.ServeHTTP(w, req) + return + } + + // authorization header is not required anymore in case of a successful authentication. + req.Header.Del("Authorization") + + req = req.WithContext(genericapirequest.WithUser(req.Context(), resp.User)) + handler.ServeHTTP(w, req) + }) +} + +func Unauthorized(s runtime.NegotiatedSerializer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + requestInfo, found := genericapirequest.RequestInfoFrom(ctx) + if !found { + responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context")) + return + } + + gv := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + responsewriters.ErrorNegotiated(apierrors.NewUnauthorized("Unauthorized"), s, gv, w, req) + }) +} + +func audiencesAreAcceptable(apiAuds, responseAudiences authenticator.Audiences) bool { + if len(apiAuds) == 0 || len(responseAudiences) == 0 { + return true + } + + return len(apiAuds.Intersect(responseAudiences)) > 0 +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go new file mode 100644 index 0000000000..09d7db8cc9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go @@ -0,0 +1,86 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "errors" + "fmt" + "net/http" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/audit/policy" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" +) + +// WithFailedAuthenticationAudit decorates a failed http.Handler used in WithAuthentication handler. +// It is meant to log only failed authentication requests. +func WithFailedAuthenticationAudit(failedHandler http.Handler, sink audit.Sink, policy policy.Checker) http.Handler { + if sink == nil || policy == nil { + return failedHandler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + req, ev, omitStages, err := createAuditEventAndAttachToContext(req, policy) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err)) + responsewriters.InternalError(w, req, errors.New("failed to create audit event")) + return + } + if ev == nil { + failedHandler.ServeHTTP(w, req) + return + } + + ev.ResponseStatus = &metav1.Status{} + ev.ResponseStatus.Message = getAuthMethods(req) + ev.Stage = auditinternal.StageResponseStarted + + rw := decorateResponseWriter(w, ev, sink, omitStages) + failedHandler.ServeHTTP(rw, req) + }) +} + +func getAuthMethods(req *http.Request) string { + authMethods := []string{} + + if _, _, ok := req.BasicAuth(); ok { + authMethods = append(authMethods, "basic") + } + + auth := strings.TrimSpace(req.Header.Get("Authorization")) + parts := strings.Split(auth, " ") + if len(parts) > 1 && strings.ToLower(parts[0]) == "bearer" { + authMethods = append(authMethods, "bearer") + } + + token := strings.TrimSpace(req.URL.Query().Get("access_token")) + if len(token) > 0 { + authMethods = append(authMethods, "access_token") + } + + if req.TLS != nil && len(req.TLS.PeerCertificates) > 0 { + authMethods = append(authMethods, "x509") + } + + if len(authMethods) > 0 { + return fmt.Sprintf("Authentication failed, attempted: %s", strings.Join(authMethods, ", ")) + } + return "Authentication failed, no credentials provided" +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go new file mode 100644 index 0000000000..8d115ff091 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "context" + "errors" + "net/http" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" +) + +const ( + // Annotation key names set in advanced audit + decisionAnnotationKey = "authorization.k8s.io/decision" + reasonAnnotationKey = "authorization.k8s.io/reason" + + // Annotation values set in advanced audit + decisionAllow = "allow" + decisionForbid = "forbid" + reasonError = "internal error" +) + +// WithAuthorizationCheck passes all authorized requests on to handler, and returns a forbidden error otherwise. +func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler { + if a == nil { + klog.Warningf("Authorization is disabled") + return handler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + ae := request.AuditEventFrom(ctx) + + attributes, err := GetAuthorizerAttributes(ctx) + if err != nil { + responsewriters.InternalError(w, req, err) + return + } + authorized, reason, err := a.Authorize(ctx, attributes) + // an authorizer like RBAC could encounter evaluation errors and still allow the request, so authorizer decision is checked before error here. + if authorized == authorizer.DecisionAllow { + audit.LogAnnotation(ae, decisionAnnotationKey, decisionAllow) + audit.LogAnnotation(ae, reasonAnnotationKey, reason) + handler.ServeHTTP(w, req) + return + } + if err != nil { + audit.LogAnnotation(ae, reasonAnnotationKey, reasonError) + responsewriters.InternalError(w, req, err) + return + } + + klog.V(4).Infof("Forbidden: %#v, Reason: %q", req.RequestURI, reason) + audit.LogAnnotation(ae, decisionAnnotationKey, decisionForbid) + audit.LogAnnotation(ae, reasonAnnotationKey, reason) + responsewriters.Forbidden(ctx, attributes, w, req, reason, s) + }) +} + +func GetAuthorizerAttributes(ctx context.Context) (authorizer.Attributes, error) { + attribs := authorizer.AttributesRecord{} + + user, ok := request.UserFrom(ctx) + if ok { + attribs.User = user + } + + requestInfo, found := request.RequestInfoFrom(ctx) + if !found { + return nil, errors.New("no RequestInfo found in the context") + } + + // Start with common attributes that apply to resource and non-resource requests + attribs.ResourceRequest = requestInfo.IsResourceRequest + attribs.Path = requestInfo.Path + attribs.Verb = requestInfo.Verb + + attribs.APIGroup = requestInfo.APIGroup + attribs.APIVersion = requestInfo.APIVersion + attribs.Resource = requestInfo.Resource + attribs.Subresource = requestInfo.Subresource + attribs.Namespace = requestInfo.Namespace + attribs.Name = requestInfo.Name + + return &attribs, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/cachecontrol.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/cachecontrol.go new file mode 100644 index 0000000000..e19f9d055f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/cachecontrol.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" +) + +// WithCacheControl sets the Cache-Control header to "no-cache, private" because all servers are protected by authn/authz. +// see https://developers.google.com/web/fundamentals/performance/optimizing-content-efficiency/http-caching#defining_optimal_cache-control_policy +func WithCacheControl(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // Set the cache-control header if it is not already set + if _, ok := w.Header()["Cache-Control"]; !ok { + w.Header().Set("Cache-Control", "no-cache, private") + } + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/doc.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/doc.go new file mode 100644 index 0000000000..a13125a25a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package filters contains all the http handler chain filters which +// _are_ api related, i.e. which are prerequisite for the API services +// to work (in contrast to the filters in the server package which are +// not part of the API contract). +package filters // import "k8s.io/apiserver/pkg/endpoints/filters" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go new file mode 100644 index 0000000000..1246ae863a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -0,0 +1,239 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "k8s.io/klog/v2" + + authenticationv1 "k8s.io/api/authentication/v1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/server/httplog" +) + +// WithImpersonation is a filter that will inspect and check requests that attempt to change the user.Info for their requests +func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + impersonationRequests, err := buildImpersonationRequests(req.Header) + if err != nil { + klog.V(4).Infof("%v", err) + responsewriters.InternalError(w, req, err) + return + } + if len(impersonationRequests) == 0 { + handler.ServeHTTP(w, req) + return + } + + ctx := req.Context() + requestor, exists := request.UserFrom(ctx) + if !exists { + responsewriters.InternalError(w, req, errors.New("no user found for request")) + return + } + + // if groups are not specified, then we need to look them up differently depending on the type of user + // if they are specified, then they are the authority (including the inclusion of system:authenticated/system:unauthenticated groups) + groupsSpecified := len(req.Header[authenticationv1.ImpersonateGroupHeader]) > 0 + + // make sure we're allowed to impersonate each thing we're requesting. While we're iterating through, start building username + // and group information + username := "" + groups := []string{} + userExtra := map[string][]string{} + for _, impersonationRequest := range impersonationRequests { + gvk := impersonationRequest.GetObjectKind().GroupVersionKind() + actingAsAttributes := &authorizer.AttributesRecord{ + User: requestor, + Verb: "impersonate", + APIGroup: gvk.Group, + APIVersion: gvk.Version, + Namespace: impersonationRequest.Namespace, + Name: impersonationRequest.Name, + ResourceRequest: true, + } + + switch gvk.GroupKind() { + case v1.SchemeGroupVersion.WithKind("ServiceAccount").GroupKind(): + actingAsAttributes.Resource = "serviceaccounts" + username = serviceaccount.MakeUsername(impersonationRequest.Namespace, impersonationRequest.Name) + if !groupsSpecified { + // if groups aren't specified for a service account, we know the groups because its a fixed mapping. Add them + groups = serviceaccount.MakeGroupNames(impersonationRequest.Namespace) + } + + case v1.SchemeGroupVersion.WithKind("User").GroupKind(): + actingAsAttributes.Resource = "users" + username = impersonationRequest.Name + + case v1.SchemeGroupVersion.WithKind("Group").GroupKind(): + actingAsAttributes.Resource = "groups" + groups = append(groups, impersonationRequest.Name) + + case authenticationv1.SchemeGroupVersion.WithKind("UserExtra").GroupKind(): + extraKey := impersonationRequest.FieldPath + extraValue := impersonationRequest.Name + actingAsAttributes.Resource = "userextras" + actingAsAttributes.Subresource = extraKey + userExtra[extraKey] = append(userExtra[extraKey], extraValue) + + default: + klog.V(4).Infof("unknown impersonation request type: %v", impersonationRequest) + responsewriters.Forbidden(ctx, actingAsAttributes, w, req, fmt.Sprintf("unknown impersonation request type: %v", impersonationRequest), s) + return + } + + decision, reason, err := a.Authorize(ctx, actingAsAttributes) + if err != nil || decision != authorizer.DecisionAllow { + klog.V(4).Infof("Forbidden: %#v, Reason: %s, Error: %v", req.RequestURI, reason, err) + responsewriters.Forbidden(ctx, actingAsAttributes, w, req, reason, s) + return + } + } + + if username != user.Anonymous { + // When impersonating a non-anonymous user, include the 'system:authenticated' group + // in the impersonated user info: + // - if no groups were specified + // - if a group has been specified other than 'system:authenticated' + // + // If 'system:unauthenticated' group has been specified we should not include + // the 'system:authenticated' group. + addAuthenticated := true + for _, group := range groups { + if group == user.AllAuthenticated || group == user.AllUnauthenticated { + addAuthenticated = false + break + } + } + + if addAuthenticated { + groups = append(groups, user.AllAuthenticated) + } + } else { + addUnauthenticated := true + for _, group := range groups { + if group == user.AllUnauthenticated { + addUnauthenticated = false + break + } + } + + if addUnauthenticated { + groups = append(groups, user.AllUnauthenticated) + } + } + + newUser := &user.DefaultInfo{ + Name: username, + Groups: groups, + Extra: userExtra, + } + req = req.WithContext(request.WithUser(ctx, newUser)) + + oldUser, _ := request.UserFrom(ctx) + httplog.LogOf(req, w).Addf("%v is acting as %v", oldUser, newUser) + + ae := request.AuditEventFrom(ctx) + audit.LogImpersonatedUser(ae, newUser) + + // clear all the impersonation headers from the request + req.Header.Del(authenticationv1.ImpersonateUserHeader) + req.Header.Del(authenticationv1.ImpersonateGroupHeader) + for headerName := range req.Header { + if strings.HasPrefix(headerName, authenticationv1.ImpersonateUserExtraHeaderPrefix) { + req.Header.Del(headerName) + } + } + + handler.ServeHTTP(w, req) + }) +} + +func unescapeExtraKey(encodedKey string) string { + key, err := url.PathUnescape(encodedKey) // Decode %-encoded bytes. + if err != nil { + return encodedKey // Always record extra strings, even if malformed/unencoded. + } + return key +} + +// buildImpersonationRequests returns a list of objectreferences that represent the different things we're requesting to impersonate. +// Also includes a map[string][]string representing user.Info.Extra +// Each request must be authorized against the current user before switching contexts. +func buildImpersonationRequests(headers http.Header) ([]v1.ObjectReference, error) { + impersonationRequests := []v1.ObjectReference{} + + requestedUser := headers.Get(authenticationv1.ImpersonateUserHeader) + hasUser := len(requestedUser) > 0 + if hasUser { + if namespace, name, err := serviceaccount.SplitUsername(requestedUser); err == nil { + impersonationRequests = append(impersonationRequests, v1.ObjectReference{Kind: "ServiceAccount", Namespace: namespace, Name: name}) + } else { + impersonationRequests = append(impersonationRequests, v1.ObjectReference{Kind: "User", Name: requestedUser}) + } + } + + hasGroups := false + for _, group := range headers[authenticationv1.ImpersonateGroupHeader] { + hasGroups = true + impersonationRequests = append(impersonationRequests, v1.ObjectReference{Kind: "Group", Name: group}) + } + + hasUserExtra := false + for headerName, values := range headers { + if !strings.HasPrefix(headerName, authenticationv1.ImpersonateUserExtraHeaderPrefix) { + continue + } + + hasUserExtra = true + extraKey := unescapeExtraKey(strings.ToLower(headerName[len(authenticationv1.ImpersonateUserExtraHeaderPrefix):])) + + // make a separate request for each extra value they're trying to set + for _, value := range values { + impersonationRequests = append(impersonationRequests, + v1.ObjectReference{ + Kind: "UserExtra", + // we only parse out a group above, but the parsing will fail if there isn't SOME version + // using the internal version will help us fail if anyone starts using it + APIVersion: authenticationv1.SchemeGroupVersion.String(), + Name: value, + // ObjectReference doesn't have a subresource field. FieldPath is close and available, so we'll use that + // TODO fight the good fight for ObjectReference to refer to resources and subresources + FieldPath: extraKey, + }) + } + } + + if (hasGroups || hasUserExtra) && !hasUser { + return nil, fmt.Errorf("requested %v without impersonating a user", impersonationRequests) + } + + return impersonationRequests, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go new file mode 100644 index 0000000000..421c0e0a2b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go @@ -0,0 +1,115 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "strings" + "time" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +const ( + successLabel = "success" + failureLabel = "failure" + errorLabel = "error" +) + +var ( + authenticatedUserCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "authenticated_user_requests", + Help: "Counter of authenticated requests broken out by username.", + StabilityLevel: metrics.ALPHA, + }, + []string{"username"}, + ) + + authenticatedAttemptsCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "authentication_attempts", + Help: "Counter of authenticated attempts.", + StabilityLevel: metrics.ALPHA, + }, + []string{"result"}, + ) + + authenticationLatency = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Name: "authentication_duration_seconds", + Help: "Authentication duration in seconds broken out by result.", + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), + StabilityLevel: metrics.ALPHA, + }, + []string{"result"}, + ) +) + +func init() { + legacyregistry.MustRegister(authenticatedUserCounter) + legacyregistry.MustRegister(authenticatedAttemptsCounter) + legacyregistry.MustRegister(authenticationLatency) +} + +func recordAuthMetrics(resp *authenticator.Response, ok bool, err error, apiAudiences authenticator.Audiences, authStart time.Time) { + var resultLabel string + + switch { + case err != nil || (resp != nil && !audiencesAreAcceptable(apiAudiences, resp.Audiences)): + resultLabel = errorLabel + case !ok: + resultLabel = failureLabel + default: + resultLabel = successLabel + authenticatedUserCounter.WithLabelValues(compressUsername(resp.User.GetName())).Inc() + } + + authenticatedAttemptsCounter.WithLabelValues(resultLabel).Inc() + authenticationLatency.WithLabelValues(resultLabel).Observe(time.Since(authStart).Seconds()) +} + +// compressUsername maps all possible usernames onto a small set of categories +// of usernames. This is done both to limit the cardinality of the +// authorized_user_requests metric, and to avoid pushing actual usernames in the +// metric. +func compressUsername(username string) string { + switch { + // Known internal identities. + case username == "admin" || + username == "client" || + username == "kube_proxy" || + username == "kubelet" || + username == "system:serviceaccount:kube-system:default": + return username + // Probably an email address. + case strings.Contains(username, "@"): + return "email_id" + // Anything else (custom service accounts, custom external identities, etc.) + default: + return "other" + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go new file mode 100644 index 0000000000..d9e7369f67 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" + + utilclock "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/endpoints/request" +) + +// WithRequestReceivedTimestamp attaches the ReceivedTimestamp (the time the request reached +// the apiserver) to the context. +func WithRequestReceivedTimestamp(handler http.Handler) http.Handler { + return withRequestReceivedTimestampWithClock(handler, utilclock.RealClock{}) +} + +// The clock is passed as a parameter, handy for unit testing. +func withRequestReceivedTimestampWithClock(handler http.Handler, clock utilclock.PassiveClock) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + req = req.WithContext(request.WithReceivedTimestamp(ctx, clock.Now())) + + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/requestinfo.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/requestinfo.go new file mode 100644 index 0000000000..9cc524d4e2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/requestinfo.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "fmt" + "net/http" + + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" +) + +// WithRequestInfo attaches a RequestInfo to the context. +func WithRequestInfo(handler http.Handler, resolver request.RequestInfoResolver) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + info, err := resolver.NewRequestInfo(req) + if err != nil { + responsewriters.InternalError(w, req, fmt.Errorf("failed to create RequestInfo: %v", err)) + return + } + + req = req.WithContext(request.WithRequestInfo(ctx, info)) + + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/storageversion.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/storageversion.go new file mode 100644 index 0000000000..414fc194ef --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/storageversion.go @@ -0,0 +1,121 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "errors" + "fmt" + "net/http" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/storageversion" + _ "k8s.io/component-base/metrics/prometheus/workqueue" // for workqueue metric registration + "k8s.io/klog/v2" +) + +// WithStorageVersionPrecondition checks if the storage version barrier has +// completed, if not, it only passes the following API requests: +// 1. non-resource requests, +// 2. read requests, +// 3. write requests to the storageversion API, +// 4. create requests to the namespace API sent by apiserver itself, +// 5. write requests to the lease API in kube-system namespace, +// 6. resources whose StorageVersion is not pending update, including non-persisted resources. +func WithStorageVersionPrecondition(handler http.Handler, svm storageversion.Manager, s runtime.NegotiatedSerializer) http.Handler { + if svm == nil { + // TODO(roycaihw): switch to warning after the feature graduate to beta/GA + klog.V(2).Infof("Storage Version barrier is disabled") + return handler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if svm.Completed() { + handler.ServeHTTP(w, req) + return + } + ctx := req.Context() + requestInfo, found := request.RequestInfoFrom(ctx) + if !found { + responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context")) + return + } + // Allow non-resource requests + if !requestInfo.IsResourceRequest { + handler.ServeHTTP(w, req) + return + } + // Allow read requests + if requestInfo.Verb == "get" || requestInfo.Verb == "list" || requestInfo.Verb == "watch" { + handler.ServeHTTP(w, req) + return + } + // Allow writes to the storage version API + if requestInfo.APIGroup == "internal.apiserver.k8s.io" && requestInfo.Resource == "storageversions" { + handler.ServeHTTP(w, req) + return + } + // The system namespace is required for apiserver-identity lease to exist. Allow the apiserver + // itself to create namespaces. + // NOTE: with this exception, if the bootstrap client writes namespaces with a new version, + // and the upgraded apiserver dies before updating the StorageVersion for namespaces, the + // storage migrator won't be able to tell these namespaces are stored in a different version in etcd. + // Because the bootstrap client only creates system namespace and doesn't update them, this can + // only happen if the upgraded apiserver is the first apiserver that kicks off namespace creation, + // or if an upgraded server that joins an existing cluster has new system namespaces (other + // than kube-system, kube-public, kube-node-lease) that need to be created. + u, hasUser := request.UserFrom(ctx) + if requestInfo.APIGroup == "" && requestInfo.Resource == "namespaces" && + requestInfo.Verb == "create" && hasUser && + u.GetName() == user.APIServerUser && contains(u.GetGroups(), user.SystemPrivilegedGroup) { + handler.ServeHTTP(w, req) + return + } + // Allow writes to the lease API in kube-system. The storage version API depends on the + // apiserver-identity leases to operate. Leases in kube-system are either apiserver-identity + // lease (which gets garbage collected when stale) or leader-election leases (which gets + // periodically updated by system components). Both types of leases won't be stale in etcd. + if requestInfo.APIGroup == "coordination.k8s.io" && requestInfo.Resource == "leases" && + requestInfo.Namespace == metav1.NamespaceSystem { + handler.ServeHTTP(w, req) + return + } + // If the resource's StorageVersion is not in the to-be-updated list, let it pass. + // Non-persisted resources are not in the to-be-updated list, so they will pass. + gr := schema.GroupResource{requestInfo.APIGroup, requestInfo.Resource} + if !svm.PendingUpdate(gr) { + handler.ServeHTTP(w, req) + return + } + + gv := schema.GroupVersion{requestInfo.APIGroup, requestInfo.APIVersion} + responsewriters.ErrorNegotiated(apierrors.NewServiceUnavailable(fmt.Sprintf("wait for storage version registration to complete for resource: %v, last seen error: %v", gr, svm.LastUpdateError(gr))), s, gv, w, req) + }) +} + +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/warning.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/warning.go new file mode 100644 index 0000000000..55e85f0b73 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/warning.go @@ -0,0 +1,133 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "fmt" + "net/http" + "sync" + "unicode/utf8" + + "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/warning" +) + +// WithWarningRecorder attaches a deduplicating k8s.io/apiserver/pkg/warning#WarningRecorder to the request context. +func WithWarningRecorder(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + recorder := &recorder{writer: w} + req = req.WithContext(warning.WithWarningRecorder(req.Context(), recorder)) + handler.ServeHTTP(w, req) + }) +} + +var ( + truncateAtTotalRunes = 4 * 1024 + truncateItemRunes = 256 +) + +type recordedWarning struct { + agent string + text string +} + +type recorder struct { + // lock guards calls to AddWarning from multiple threads + lock sync.Mutex + + // recorded tracks whether AddWarning was already called with a given text + recorded map[string]bool + + // ordered tracks warnings added so they can be replayed and truncated if needed + ordered []recordedWarning + + // written tracks how many runes of text have been added as warning headers + written int + + // truncating tracks if we have already exceeded truncateAtTotalRunes and are now truncating warning messages as we add them + truncating bool + + // writer is the response writer to add warning headers to + writer http.ResponseWriter +} + +func (r *recorder) AddWarning(agent, text string) { + if len(text) == 0 { + return + } + + r.lock.Lock() + defer r.lock.Unlock() + + // if we've already exceeded our limit and are already truncating, return early + if r.written >= truncateAtTotalRunes && r.truncating { + return + } + + // init if needed + if r.recorded == nil { + r.recorded = map[string]bool{} + } + + // dedupe if already warned + if r.recorded[text] { + return + } + r.recorded[text] = true + r.ordered = append(r.ordered, recordedWarning{agent: agent, text: text}) + + // truncate on a rune boundary, if needed + textRuneLength := utf8.RuneCountInString(text) + if r.truncating && textRuneLength > truncateItemRunes { + text = string([]rune(text)[:truncateItemRunes]) + textRuneLength = truncateItemRunes + } + + // compute the header + header, err := net.NewWarningHeader(299, agent, text) + if err != nil { + return + } + + // if this fits within our limit, or we're already truncating, write and return + if r.written+textRuneLength <= truncateAtTotalRunes || r.truncating { + r.written += textRuneLength + r.writer.Header().Add("Warning", header) + return + } + + // otherwise, enable truncation, reset, and replay the existing items as truncated warnings + r.truncating = true + r.written = 0 + r.writer.Header().Del("Warning") + utilruntime.HandleError(fmt.Errorf("exceeded max warning header size, truncating")) + for _, w := range r.ordered { + agent := w.agent + text := w.text + + textRuneLength := utf8.RuneCountInString(text) + if textRuneLength > truncateItemRunes { + text = string([]rune(text)[:truncateItemRunes]) + textRuneLength = truncateItemRunes + } + if header, err := net.NewWarningHeader(299, agent, text); err == nil { + r.written += textRuneLength + r.writer.Header().Add("Warning", header) + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go b/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go new file mode 100644 index 0000000000..22b9736614 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go @@ -0,0 +1,137 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "path" + "time" + + restful "github.com/emicklei/go-restful" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/discovery" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/storageversion" + openapiproto "k8s.io/kube-openapi/pkg/util/proto" +) + +// APIGroupVersion is a helper for exposing rest.Storage objects as http.Handlers via go-restful +// It handles URLs of the form: +// /${storage_key}[/${object_name}] +// Where 'storage_key' points to a rest.Storage object stored in storage. +// This object should contain all parameterization necessary for running a particular API version +type APIGroupVersion struct { + Storage map[string]rest.Storage + + Root string + + // GroupVersion is the external group version + GroupVersion schema.GroupVersion + + // OptionsExternalVersion controls the Kubernetes APIVersion used for common objects in the apiserver + // schema like api.Status, api.DeleteOptions, and metav1.ListOptions. Other implementors may + // define a version "v1beta1" but want to use the Kubernetes "v1" internal objects. If + // empty, defaults to GroupVersion. + OptionsExternalVersion *schema.GroupVersion + // MetaGroupVersion defaults to "meta.k8s.io/v1" and is the scheme group version used to decode + // common API implementations like ListOptions. Future changes will allow this to vary by group + // version (for when the inevitable meta/v2 group emerges). + MetaGroupVersion *schema.GroupVersion + + // RootScopedKinds are the root scoped kinds for the primary GroupVersion + RootScopedKinds sets.String + + // Serializer is used to determine how to convert responses from API methods into bytes to send over + // the wire. + Serializer runtime.NegotiatedSerializer + ParameterCodec runtime.ParameterCodec + + Typer runtime.ObjectTyper + Creater runtime.ObjectCreater + Convertor runtime.ObjectConvertor + Defaulter runtime.ObjectDefaulter + Linker runtime.SelfLinker + UnsafeConvertor runtime.ObjectConvertor + TypeConverter fieldmanager.TypeConverter + + EquivalentResourceRegistry runtime.EquivalentResourceRegistry + + // Authorizer determines whether a user is allowed to make a certain request. The Handler does a preliminary + // authorization check using the request URI but it may be necessary to make additional checks, such as in + // the create-on-update case + Authorizer authorizer.Authorizer + + Admit admission.Interface + + MinRequestTimeout time.Duration + + // OpenAPIModels exposes the OpenAPI models to each individual handler. + OpenAPIModels openapiproto.Models + + // The limit on the request body size that would be accepted and decoded in a write request. + // 0 means no limit. + MaxRequestBodyBytes int64 +} + +// InstallREST registers the REST handlers (storage, watch, proxy and redirect) into a restful Container. +// It is expected that the provided path root prefix will serve all operations. Root MUST NOT end +// in a slash. +func (g *APIGroupVersion) InstallREST(container *restful.Container) ([]*storageversion.ResourceInfo, error) { + prefix := path.Join(g.Root, g.GroupVersion.Group, g.GroupVersion.Version) + installer := &APIInstaller{ + group: g, + prefix: prefix, + minRequestTimeout: g.MinRequestTimeout, + } + + apiResources, resourceInfos, ws, registrationErrors := installer.Install() + versionDiscoveryHandler := discovery.NewAPIVersionHandler(g.Serializer, g.GroupVersion, staticLister{apiResources}) + versionDiscoveryHandler.AddToWebService(ws) + container.Add(ws) + return removeNonPersistedResources(resourceInfos), utilerrors.NewAggregate(registrationErrors) +} + +func removeNonPersistedResources(infos []*storageversion.ResourceInfo) []*storageversion.ResourceInfo { + var filtered []*storageversion.ResourceInfo + for _, info := range infos { + // if EncodingVersion is empty, then the apiserver does not + // need to register this resource via the storage version API, + // thus we can remove it. + if info != nil && len(info.EncodingVersion) > 0 { + filtered = append(filtered, info) + } + } + return filtered +} + +// staticLister implements the APIResourceLister interface +type staticLister struct { + list []metav1.APIResource +} + +func (s staticLister) ListAPIResources() []metav1.APIResource { + return s.list +} + +var _ discovery.APIResourceLister = &staticLister{} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go new file mode 100644 index 0000000000..631914f36a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -0,0 +1,247 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "bytes" + "context" + "fmt" + "net/http" + "strings" + "time" + "unicode" + "unicode/utf8" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/util/dryrun" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utiltrace "k8s.io/utils/trace" +) + +var namespaceGVK = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"} + +func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("Create", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) + defer trace.LogIfLong(500 * time.Millisecond) + + if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { + scope.err(errors.NewBadRequest("the dryRun feature is disabled"), w, req) + return + } + + // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + if includeName { + // name was required, return + scope.err(err, w, req) + return + } + + // otherwise attempt to look up the namespace + namespace, err = scope.Namer.Namespace(req) + if err != nil { + scope.err(err, w, req) + return + } + } + + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + gv := scope.Kind.GroupVersion() + s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) + if err != nil { + scope.err(err, w, req) + return + } + + decoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion) + + body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) + if err != nil { + scope.err(err, w, req) + return + } + + options := &metav1.CreateOptions{} + values := req.URL.Query() + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + if errs := validation.ValidateCreateOptions(options); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "CreateOptions"}, "", errs) + scope.err(err, w, req) + return + } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) + + defaultGVK := scope.Kind + original := r.New() + trace.Step("About to convert to expected version") + obj, gvk, err := decoder.Decode(body, &defaultGVK, original) + if err != nil { + err = transformDecodeError(scope.Typer, err, original, gvk, body) + scope.err(err, w, req) + return + } + if gvk.GroupVersion() != gv { + err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%v)", gvk.GroupVersion().String(), gv.String())) + scope.err(err, w, req) + return + } + trace.Step("Conversion done") + + // On create, get name from new object if unset + if len(name) == 0 { + _, name, _ = scope.Namer.ObjectName(obj) + } + if len(namespace) == 0 && *gvk == namespaceGVK { + namespace = name + } + ctx = request.WithNamespace(ctx, namespace) + + ae := request.AuditEventFrom(ctx) + admit = admission.WithAudit(admit, ae) + audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) + + userInfo, _ := request.UserFrom(ctx) + + trace.Step("About to store object in database") + admissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo) + requestFunc := func() (runtime.Object, error) { + return r.Create( + ctx, + name, + obj, + rest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope), + options, + ) + } + // Dedup owner references before updating managed fields + dedupOwnerReferencesAndAddWarning(obj, req.Context(), false) + result, err := finishRequest(timeout, func() (runtime.Object, error) { + if scope.FieldManager != nil { + liveObj, err := scope.Creater.New(scope.Kind) + if err != nil { + return nil, fmt.Errorf("failed to create new object (Create for %v): %v", scope.Kind, err) + } + obj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent())) + } + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) { + if err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil { + return nil, err + } + } + // Dedup owner references again after mutating admission happens + dedupOwnerReferencesAndAddWarning(obj, req.Context(), true) + result, err := requestFunc() + // If the object wasn't committed to storage because it's serialized size was too large, + // it is safe to remove managedFields (which can be large) and try again. + if isTooLargeError(err) { + if accessor, accessorErr := meta.Accessor(obj); accessorErr == nil { + accessor.SetManagedFields(nil) + result, err = requestFunc() + } + } + return result, err + }) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Object stored in database") + + code := http.StatusCreated + status, ok := result.(*metav1.Status) + if ok && err == nil && status.Code == 0 { + status.Code = int32(code) + } + + transformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result) + } +} + +// CreateNamedResource returns a function that will handle a resource creation with name. +func CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc { + return createHandler(r, scope, admission, true) +} + +// CreateResource returns a function that will handle a resource creation. +func CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc { + return createHandler(&namedCreaterAdapter{r}, scope, admission, false) +} + +type namedCreaterAdapter struct { + rest.Creater +} + +func (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { + return c.Creater.Create(ctx, obj, createValidatingAdmission, options) +} + +// manager is assumed to be already a valid value, we need to make +// userAgent into a valid value too. +func managerOrUserAgent(manager, userAgent string) string { + if manager != "" { + return manager + } + return prefixFromUserAgent(userAgent) +} + +// prefixFromUserAgent takes the characters preceding the first /, quote +// unprintable character and then trim what's beyond the +// FieldManagerMaxLength limit. +func prefixFromUserAgent(u string) string { + m := strings.Split(u, "/")[0] + buf := bytes.NewBuffer(nil) + for _, r := range m { + // Ignore non-printable characters + if !unicode.IsPrint(r) { + continue + } + // Only append if we have room for it + if buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength { + break + } + buf.WriteRune(r) + } + return buf.String() +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go new file mode 100644 index 0000000000..892aaf4a0d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -0,0 +1,290 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "context" + "fmt" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/util/dryrun" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utiltrace "k8s.io/utils/trace" +) + +// DeleteResource returns a function that will handle a resource deletion +// TODO admission here becomes solely validating admission +func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("Delete", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) + defer trace.LogIfLong(500 * time.Millisecond) + + if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { + scope.err(errors.NewBadRequest("the dryRun feature is disabled"), w, req) + return + } + + // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + ctx = request.WithNamespace(ctx, namespace) + ae := request.AuditEventFrom(ctx) + admit = admission.WithAudit(admit, ae) + + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + options := &metav1.DeleteOptions{} + if allowsOptions { + body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) + if err != nil { + scope.err(err, w, req) + return + } + if len(body) > 0 { + s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) + if err != nil { + scope.err(err, w, req) + return + } + // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions + // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions + defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") + obj, _, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) + if err != nil { + scope.err(err, w, req) + return + } + if obj != options { + scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) + return + } + trace.Step("Decoded delete options") + + ae := request.AuditEventFrom(ctx) + audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) + trace.Step("Recorded the audit event") + } else { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + } + } + if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) + scope.err(err, w, req) + return + } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) + + trace.Step("About to delete object from database") + wasDeleted := true + userInfo, _ := request.UserFrom(ctx) + staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) + result, err := finishRequest(timeout, func() (runtime.Object, error) { + obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options) + wasDeleted = deleted + return obj, err + }) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Object deleted from database") + + status := http.StatusOK + // Return http.StatusAccepted if the resource was not deleted immediately and + // user requested cascading deletion by setting OrphanDependents=false. + // Note: We want to do this always if resource was not deleted immediately, but + // that will break existing clients. + // Other cases where resource is not instantly deleted are: namespace deletion + // and pod graceful deletion. + if !wasDeleted && options.OrphanDependents != nil && !*options.OrphanDependents { + status = http.StatusAccepted + } + // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid + // object with the response. + if result == nil { + result = &metav1.Status{ + Status: metav1.StatusSuccess, + Code: int32(status), + Details: &metav1.StatusDetails{ + Name: name, + Kind: scope.Kind.Kind, + }, + } + } + + transformResponseObject(ctx, scope, trace, req, w, status, outputMediaType, result) + } +} + +// DeleteCollection returns a function that will handle a collection deletion +func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + trace := utiltrace.New("Delete", utiltrace.Field{"url", req.URL.Path}) + defer trace.LogIfLong(500 * time.Millisecond) + + if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { + scope.err(errors.NewBadRequest("the dryRun feature is disabled"), w, req) + return + } + + // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, err := scope.Namer.Namespace(req) + if err != nil { + scope.err(err, w, req) + return + } + + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + ctx = request.WithNamespace(ctx, namespace) + ae := request.AuditEventFrom(ctx) + + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + listOptions := metainternalversion.ListOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + + if errs := metainternalversionvalidation.ValidateListOptions(&listOptions); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs) + scope.err(err, w, req) + return + } + + // transform fields + // TODO: DecodeParametersInto should do this. + if listOptions.FieldSelector != nil { + fn := func(label, value string) (newLabel, newValue string, err error) { + return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value) + } + if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil { + // TODO: allow bad request to set field causes based on query parameters + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + } + + options := &metav1.DeleteOptions{} + if checkBody { + body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) + if err != nil { + scope.err(err, w, req) + return + } + if len(body) > 0 { + s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) + if err != nil { + scope.err(err, w, req) + return + } + // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions + // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions + defaultGVK := scope.Kind.GroupVersion().WithKind("DeleteOptions") + obj, _, err := scope.Serializer.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) + if err != nil { + scope.err(err, w, req) + return + } + if obj != options { + scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) + return + } + + ae := request.AuditEventFrom(ctx) + audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) + } else { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + } + } + if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) + scope.err(err, w, req) + return + } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) + + admit = admission.WithAudit(admit, ae) + userInfo, _ := request.UserFrom(ctx) + staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) + result, err := finishRequest(timeout, func() (runtime.Object, error) { + return r.DeleteCollection(ctx, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options, &listOptions) + }) + if err != nil { + scope.err(err, w, req) + return + } + + // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid + // object with the response. + if result == nil { + result = &metav1.Status{ + Status: metav1.StatusSuccess, + Code: http.StatusOK, + Details: &metav1.StatusDetails{ + Kind: scope.Kind.Kind, + }, + } + } + + transformResponseObject(ctx, scope, trace, req, w, http.StatusOK, outputMediaType, result) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/doc.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/doc.go new file mode 100644 index 0000000000..d39833814e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package handlers contains HTTP handlers to implement the apiserver APIs. +package handlers // import "k8s.io/apiserver/pkg/endpoints/handlers" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/OWNERS b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/OWNERS new file mode 100644 index 0000000000..a7470137b8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/OWNERS @@ -0,0 +1,5 @@ +approvers: +- jennybuckley +- apelisse +reviewers: +- kwiesmueller diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go new file mode 100644 index 0000000000..fc471e7975 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go @@ -0,0 +1,72 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" +) + +type buildManagerInfoManager struct { + fieldManager Manager + groupVersion schema.GroupVersion +} + +var _ Manager = &buildManagerInfoManager{} + +// NewBuildManagerInfoManager creates a new Manager that converts the manager name into a unique identifier +// combining operation and version for update requests, and just operation for apply requests. +func NewBuildManagerInfoManager(f Manager, gv schema.GroupVersion) Manager { + return &buildManagerInfoManager{ + fieldManager: f, + groupVersion: gv, + } +} + +// Update implements Manager. +func (f *buildManagerInfoManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationUpdate) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *buildManagerInfoManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationApply) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) +} + +func (f *buildManagerInfoManager) buildManagerInfo(prefix string, operation metav1.ManagedFieldsOperationType) (string, error) { + managerInfo := metav1.ManagedFieldsEntry{ + Manager: prefix, + Operation: operation, + APIVersion: f.groupVersion.String(), + } + if managerInfo.Manager == "" { + managerInfo.Manager = "unknown" + } + return internal.BuildManagerIdentifier(&managerInfo) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go new file mode 100644 index 0000000000..c3184e2415 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go @@ -0,0 +1,134 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + "sort" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type capManagersManager struct { + fieldManager Manager + maxUpdateManagers int + oldUpdatesManagerName string +} + +var _ Manager = &capManagersManager{} + +// NewCapManagersManager creates a new wrapped FieldManager which ensures that the number of managers from updates +// does not exceed maxUpdateManagers, by merging some of the oldest entries on each update. +func NewCapManagersManager(fieldManager Manager, maxUpdateManagers int) Manager { + return &capManagersManager{ + fieldManager: fieldManager, + maxUpdateManagers: maxUpdateManagers, + oldUpdatesManagerName: "ancient-changes", + } +} + +// Update implements Manager. +func (f *capManagersManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return object, managed, err + } + if managed, err = f.capUpdateManagers(managed); err != nil { + return nil, nil, fmt.Errorf("failed to cap update managers: %v", err) + } + return object, managed, nil +} + +// Apply implements Manager. +func (f *capManagersManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} + +// capUpdateManagers merges a number of the oldest update entries into versioned buckets, +// such that the number of entries from updates does not exceed f.maxUpdateManagers. +func (f *capManagersManager) capUpdateManagers(managed Managed) (newManaged Managed, err error) { + // Gather all entries from updates + updaters := []string{} + for manager, fields := range managed.Fields() { + if !fields.Applied() { + updaters = append(updaters, manager) + } + } + if len(updaters) <= f.maxUpdateManagers { + return managed, nil + } + + // If we have more than the maximum, sort the update entries by time, oldest first. + sort.Slice(updaters, func(i, j int) bool { + iTime, jTime, iSeconds, jSeconds := managed.Times()[updaters[i]], managed.Times()[updaters[j]], int64(0), int64(0) + if iTime != nil { + iSeconds = iTime.Unix() + } + if jTime != nil { + jSeconds = jTime.Unix() + } + if iSeconds != jSeconds { + return iSeconds < jSeconds + } + return updaters[i] < updaters[j] + }) + + // Merge the oldest updaters with versioned bucket managers until the number of updaters is under the cap + versionToFirstManager := map[string]string{} + for i, length := 0, len(updaters); i < len(updaters) && length > f.maxUpdateManagers; i++ { + manager := updaters[i] + vs := managed.Fields()[manager] + time := managed.Times()[manager] + version := string(vs.APIVersion()) + + // Create a new manager identifier for the versioned bucket entry. + // The version for this manager comes from the version of the update being merged into the bucket. + bucket, err := internal.BuildManagerIdentifier(&metav1.ManagedFieldsEntry{ + Manager: f.oldUpdatesManagerName, + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: version, + }) + if err != nil { + return managed, fmt.Errorf("failed to create bucket manager for version %v: %v", version, err) + } + + // Merge the fieldets if this is not the first time the version was seen. + // Otherwise just record the manager name in versionToFirstManager + if first, ok := versionToFirstManager[version]; ok { + // If the bucket doesn't exists yet, create one. + if _, ok := managed.Fields()[bucket]; !ok { + s := managed.Fields()[first] + delete(managed.Fields(), first) + managed.Fields()[bucket] = s + } + + managed.Fields()[bucket] = fieldpath.NewVersionedSet(vs.Set().Union(managed.Fields()[bucket].Set()), vs.APIVersion(), vs.Applied()) + delete(managed.Fields(), manager) + length-- + + // Use the time from the update being merged into the bucket, since it is more recent. + managed.Times()[bucket] = time + } else { + versionToFirstManager[version] = manager + } + } + + return managed, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml new file mode 100644 index 0000000000..a667e98342 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml @@ -0,0 +1,7018 @@ +apiVersion: v1 +kind: Endpoints +metadata: + creationTimestamp: '2016-10-04T17:45:58Z' + labels: + app: my-app + name: app-server + namespace: default + resourceVersion: '184597135' + selfLink: /self/link + uid: 6826f086-8a5a-11e6-8d09-42010a800005 +subsets: +- addresses: + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0000 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0001 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0002 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0003 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0004 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0005 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0006 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0007 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0008 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0009 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0010 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0011 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0012 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0013 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0014 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0015 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0016 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0017 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0018 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0019 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0020 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0021 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0022 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0023 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0024 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0025 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0026 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0027 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0028 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0029 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0030 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0031 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0032 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0033 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0034 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0035 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0036 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0037 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0038 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0039 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0040 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0041 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0042 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0043 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0044 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0045 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0046 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0047 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0048 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0049 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0050 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0051 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0052 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0053 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0054 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0055 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0056 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0057 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0058 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0059 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0060 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0061 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0062 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0063 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0064 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0065 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0066 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0067 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0068 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0069 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0070 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0071 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0072 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0073 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0074 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0075 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0076 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0077 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0078 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0079 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0080 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0081 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0082 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0083 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0084 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0085 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0086 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0087 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0088 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0089 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0090 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0091 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0092 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0093 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0094 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0095 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0096 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0097 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0098 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0099 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0100 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0101 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0102 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0103 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0104 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0105 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0106 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0107 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0108 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0109 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0110 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0111 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0112 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0113 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0114 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0115 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0116 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0117 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0118 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0119 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0120 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0121 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0122 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0123 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0124 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0125 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0126 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0127 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0128 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0129 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0130 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0131 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0132 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0133 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0134 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0135 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0136 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0137 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0138 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0139 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0140 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0141 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0142 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0143 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0144 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0145 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0146 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0147 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0148 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0149 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0150 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0151 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0152 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0153 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0154 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0155 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0156 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0157 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0158 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0159 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0160 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0161 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0162 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0163 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0164 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0165 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0166 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0167 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0168 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0169 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0170 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0171 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0172 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0173 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0174 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0175 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0176 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0177 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0178 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0179 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0180 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0181 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0182 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0183 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0184 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0185 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0186 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0187 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0188 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0189 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0190 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0191 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0192 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0193 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0194 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0195 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0196 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0197 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0198 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0199 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0200 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0201 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0202 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0203 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0204 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0205 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0206 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0207 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0208 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0209 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0210 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0211 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0212 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0213 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0214 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0215 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0216 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0217 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0218 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0219 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0220 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0221 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0222 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0223 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0224 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0225 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0226 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0227 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0228 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0229 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0230 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0231 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0232 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0233 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0234 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0235 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0236 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0237 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0238 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0239 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0240 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0241 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0242 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0243 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0244 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0245 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0246 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0247 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0248 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0249 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0250 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0251 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0252 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0253 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0254 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0255 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0256 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0257 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0258 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0259 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0260 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0261 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0262 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0263 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0264 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0265 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0266 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0267 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0268 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0269 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0270 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0271 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0272 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0273 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0274 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0275 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0276 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0277 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0278 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0279 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0280 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0281 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0282 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0283 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0284 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0285 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0286 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0287 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0288 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0289 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0290 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0291 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0292 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0293 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0294 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0295 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0296 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0297 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0298 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0299 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0300 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0301 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0302 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0303 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0304 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0305 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0306 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0307 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0308 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0309 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0310 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0311 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0312 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0313 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0314 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0315 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0316 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0317 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0318 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0319 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0320 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0321 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0322 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0323 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0324 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0325 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0326 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0327 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0328 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0329 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0330 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0331 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0332 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0333 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0334 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0335 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0336 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0337 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0338 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0339 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0340 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0341 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0342 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0343 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0344 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0345 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0346 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0347 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0348 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0349 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0350 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0351 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0352 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0353 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0354 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0355 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0356 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0357 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0358 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0359 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0360 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0361 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0362 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0363 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0364 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0365 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0366 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0367 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0368 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0369 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0370 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0371 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0372 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0373 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0374 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0375 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0376 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0377 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0378 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0379 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0380 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0381 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0382 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0383 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0384 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0385 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0386 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0387 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0388 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0389 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0390 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0391 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0392 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0393 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0394 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0395 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0396 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0397 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0398 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0399 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0400 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0401 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0402 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0403 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0404 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0405 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0406 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0407 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0408 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0409 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0410 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0411 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0412 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0413 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0414 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0415 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0416 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0417 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0418 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0419 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0420 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0421 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0422 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0423 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0424 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0425 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0426 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0427 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0428 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0429 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0430 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0431 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0432 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0433 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0434 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0435 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0436 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0437 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0438 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0439 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0440 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0441 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0442 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0443 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0444 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0445 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0446 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0447 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0448 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0449 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0450 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0451 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0452 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0453 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0454 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0455 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0456 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0457 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0458 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0459 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0460 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0461 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0462 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0463 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0464 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0465 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0466 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0467 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0468 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0469 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0470 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0471 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0472 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0473 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0474 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0475 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0476 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0477 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0478 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0479 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0480 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0481 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0482 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0483 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0484 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0485 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0486 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0487 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0488 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0489 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0490 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0491 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0492 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0493 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0494 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0495 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0496 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0497 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0498 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0499 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0500 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0501 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0502 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0503 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0504 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0505 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0506 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0507 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0508 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0509 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0510 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0511 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0512 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0513 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0514 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0515 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0516 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0517 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0518 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0519 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0520 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0521 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0522 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0523 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0524 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0525 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0526 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0527 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0528 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0529 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0530 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0531 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0532 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0533 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0534 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0535 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0536 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0537 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0538 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0539 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0540 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0541 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0542 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0543 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0544 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0545 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0546 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0547 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0548 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0549 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0550 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0551 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0552 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0553 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0554 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0555 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0556 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0557 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0558 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0559 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0560 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0561 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0562 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0563 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0564 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0565 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0566 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0567 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0568 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0569 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0570 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0571 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0572 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0573 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0574 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0575 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0576 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0577 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0578 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0579 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0580 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0581 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0582 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0583 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0584 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0585 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0586 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0587 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0588 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0589 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0590 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0591 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0592 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0593 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0594 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0595 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0596 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0597 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0598 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0599 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0600 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0601 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0602 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0603 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0604 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0605 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0606 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0607 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0608 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0609 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0610 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0611 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0612 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0613 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0614 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0615 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0616 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0617 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0618 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0619 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0620 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0621 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0622 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0623 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0624 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0625 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0626 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0627 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0628 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0629 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0630 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0631 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0632 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0633 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0634 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0635 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0636 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0637 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0638 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0639 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0640 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0641 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0642 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0643 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0644 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0645 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0646 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0647 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0648 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0649 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0650 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0651 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0652 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0653 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0654 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0655 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0656 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0657 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0658 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0659 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0660 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0661 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0662 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0663 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0664 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0665 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0666 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0667 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0668 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0669 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0670 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0671 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0672 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0673 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0674 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0675 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0676 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0677 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0678 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0679 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0680 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0681 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0682 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0683 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0684 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0685 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0686 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0687 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0688 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0689 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0690 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0691 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0692 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0693 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0694 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0695 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0696 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0697 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0698 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0699 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0700 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0701 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0702 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0703 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0704 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0705 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0706 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0707 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0708 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0709 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0710 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0711 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0712 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0713 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0714 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0715 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0716 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0717 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0718 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0719 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0720 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0721 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0722 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0723 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0724 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0725 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0726 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0727 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0728 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0729 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0730 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0731 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0732 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0733 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0734 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0735 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0736 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0737 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0738 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0739 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0740 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0741 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0742 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0743 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0744 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0745 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0746 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0747 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0748 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0749 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0750 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0751 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0752 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0753 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0754 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0755 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0756 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0757 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0758 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0759 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0760 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0761 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0762 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0763 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0764 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0765 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0766 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0767 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0768 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0769 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0770 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0771 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0772 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0773 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0774 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0775 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0776 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0777 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0778 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0779 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0780 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0781 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0782 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0783 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0784 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0785 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0786 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0787 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0788 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0789 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0790 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0791 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0792 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0793 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0794 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0795 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0796 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0797 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0798 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0799 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0800 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0801 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0802 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0803 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0804 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0805 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0806 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0807 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0808 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0809 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0810 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0811 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0812 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0813 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0814 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0815 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0816 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0817 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0818 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0819 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0820 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0821 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0822 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0823 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0824 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0825 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0826 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0827 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0828 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0829 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0830 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0831 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0832 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0833 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0834 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0835 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0836 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0837 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0838 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0839 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0840 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0841 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0842 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0843 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0844 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0845 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0846 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0847 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0848 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0849 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0850 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0851 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0852 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0853 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0854 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0855 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0856 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0857 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0858 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0859 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0860 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0861 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0862 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0863 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0864 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0865 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0866 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0867 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0868 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0869 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0870 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0871 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0872 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0873 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0874 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0875 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0876 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0877 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0878 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0879 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0880 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0881 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0882 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0883 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0884 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0885 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0886 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0887 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0888 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0889 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0890 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0891 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0892 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0893 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0894 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0895 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0896 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0897 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0898 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0899 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0900 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0901 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0902 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0903 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0904 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0905 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0906 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0907 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0908 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0909 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0910 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0911 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0912 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0913 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0914 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0915 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0916 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0917 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0918 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0919 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0920 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0921 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0922 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0923 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0924 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0925 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0926 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0927 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0928 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0929 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0930 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0931 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0932 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0933 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0934 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0935 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0936 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0937 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0938 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0939 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0940 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0941 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0942 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0943 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0944 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0945 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0946 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0947 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0948 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0949 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0950 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0951 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0952 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0953 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0954 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0955 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0956 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0957 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0958 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0959 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0960 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0961 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0962 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0963 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0964 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0965 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0966 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0967 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0968 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0969 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0970 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0971 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0972 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0973 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0974 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0975 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0976 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0977 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0978 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0979 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0980 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0981 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0982 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0983 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0984 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0985 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0986 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0987 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0988 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0989 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0990 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0991 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0992 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0993 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0994 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0995 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0996 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0997 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0998 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0999 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + ports: + - name: port-name + port: 8080 + protocol: TCP + diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go new file mode 100644 index 0000000000..7e81fcc892 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go @@ -0,0 +1,243 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "k8s.io/klog/v2" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates +// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum. +// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries. +const DefaultMaxUpdateManagers int = 10 + +// DefaultTrackOnCreateProbability defines the default probability that the field management of an object +// starts being tracked from the object's creation, instead of from the first time the object is applied to. +const DefaultTrackOnCreateProbability float32 = 1 + +var atMostEverySecond = internal.NewAtMostEvery(time.Second) + +// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type Managed interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +// Manager updates the managed fields and merges applied configurations. +type Manager interface { + // Update is used when the object has already been merged (non-apply + // use-case), and simply updates the managed fields in the output + // object. + Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) + + // Apply is used when server-side apply is called, as it merges the + // object and updates the managed fields. + Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) +} + +// FieldManager updates the managed fields and merge applied +// configurations. +type FieldManager struct { + fieldManager Manager + ignoreManagedFieldsFromRequestObject bool +} + +// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields +// on update and apply requests. +func NewFieldManager(f Manager, ignoreManagedFieldsFromRequestObject bool) *FieldManager { + return &FieldManager{fieldManager: f, ignoreManagedFieldsFromRequestObject: ignoreManagedFieldsFromRequestObject} +} + +// NewDefaultFieldManager creates a new FieldManager that merges apply requests +// and update managed fields for other types of requests. +func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, ignoreManagedFieldsFromRequestObject bool) (*FieldManager, error) { + f, err := NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return newDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, ignoreManagedFieldsFromRequestObject), nil +} + +// NewDefaultCRDFieldManager creates a new FieldManager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, ignoreManagedFieldsFromRequestObject bool) (_ *FieldManager, err error) { + f, err := NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return newDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, ignoreManagedFieldsFromRequestObject), nil +} + +// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic. +func newDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, ignoreManagedFieldsFromRequestObject bool) *FieldManager { + f = NewStripMetaManager(f) + f = NewManagedFieldsUpdater(f) + f = NewBuildManagerInfoManager(f, kind.GroupVersion()) + f = NewCapManagersManager(f, DefaultMaxUpdateManagers) + f = NewProbabilisticSkipNonAppliedManager(f, objectCreater, kind, DefaultTrackOnCreateProbability) + f = NewLastAppliedManager(f, typeConverter, objectConverter, kind.GroupVersion()) + f = NewLastAppliedUpdater(f) + + return NewFieldManager(f, ignoreManagedFieldsFromRequestObject) +} + +func decodeLiveManagedFields(liveObj runtime.Object) (Managed, error) { + liveAccessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, err + } + managed, err := internal.DecodeObjectManagedFields(liveAccessor.GetManagedFields()) + if err != nil { + return internal.NewEmptyManaged(), nil + } + return managed, nil +} + +func decodeManagedFields(liveObj, newObj runtime.Object, ignoreManagedFieldsFromRequestObject bool) (Managed, error) { + // We take the managedFields of the live object in case the request tries to + // manually set managedFields via a subresource. + if ignoreManagedFieldsFromRequestObject { + return decodeLiveManagedFields(liveObj) + } + + // If the object doesn't have metadata, we should just return without trying to + // set the managedFields at all, so creates/updates/patches will work normally. + newAccessor, err := meta.Accessor(newObj) + if err != nil { + return nil, err + } + + if isResetManagedFields(newAccessor.GetManagedFields()) { + return internal.NewEmptyManaged(), nil + } + + managed, err := internal.DecodeObjectManagedFields(newAccessor.GetManagedFields()) + // If the managed field is empty or we failed to decode it, + // let's try the live object. This is to prevent clients who + // don't understand managedFields from deleting it accidentally. + if err != nil || len(managed.Fields()) == 0 { + return decodeLiveManagedFields(liveObj) + } + + return managed, nil +} + +// Update is used when the object has already been merged (non-apply +// use-case), and simply updates the managed fields in the output +// object. +func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) { + // First try to decode the managed fields provided in the update, + // This is necessary to allow directly updating managed fields. + managed, err := decodeManagedFields(liveObj, newObj, f.ignoreManagedFieldsFromRequestObject) + if err != nil { + return newObj, nil + } + + internal.RemoveObjectManagedFields(liveObj) + internal.RemoveObjectManagedFields(newObj) + + if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil { + return nil, err + } + + if err = internal.EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} + +// UpdateNoErrors is the same as Update, but it will not return +// errors. If an error happens, the object is returned with +// managedFields cleared. +func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object { + obj, err := f.Update(liveObj, newObj, manager) + if err != nil { + atMostEverySecond.Do(func() { + klog.Errorf("[SHOULD NOT HAPPEN] failed to update managedFields for %v: %v", + newObj.GetObjectKind().GroupVersionKind(), + err) + }) + // Explicitly remove managedFields on failure, so that + // we can't have garbage in it. + internal.RemoveObjectManagedFields(newObj) + return newObj + } + return obj +} + +// Returns true if the managedFields indicate that the user is trying to +// reset the managedFields, i.e. if the list is non-nil but empty, or if +// the list has one empty item. +func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool { + if len(managedFields) == 0 { + return managedFields != nil + } + + if len(managedFields) == 1 { + return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{}) + } + + return false +} + +// Apply is used when server-side apply is called, as it merges the +// object and updates the managed fields. +func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) { + // If the object doesn't have metadata, apply isn't allowed. + accessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, fmt.Errorf("couldn't get accessor: %v", err) + } + + // Decode the managed fields in the live object, since it isn't allowed in the patch. + managed, err := internal.DecodeObjectManagedFields(accessor.GetManagedFields()) + if err != nil { + return nil, fmt.Errorf("failed to decode managed fields: %v", err) + } + + internal.RemoveObjectManagedFields(liveObj) + + object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + if conflicts, ok := err.(merge.Conflicts); ok { + return nil, internal.NewConflictError(conflicts) + } + return nil, err + } + + if err = internal.EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery.go new file mode 100644 index 0000000000..b75ef7416e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "sync" + "time" +) + +// AtMostEvery will never run the method more than once every specified +// duration. +type AtMostEvery struct { + delay time.Duration + lastCall time.Time + mutex sync.Mutex +} + +// NewAtMostEvery creates a new AtMostEvery, that will run the method at +// most every given duration. +func NewAtMostEvery(delay time.Duration) *AtMostEvery { + return &AtMostEvery{ + delay: delay, + } +} + +// updateLastCall returns true if the lastCall time has been updated, +// false if it was too early. +func (s *AtMostEvery) updateLastCall() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + if time.Since(s.lastCall) < s.delay { + return false + } + s.lastCall = time.Now() + return true +} + +// Do will run the method if enough time has passed, and return true. +// Otherwise, it does nothing and returns false. +func (s *AtMostEvery) Do(fn func()) bool { + if !s.updateLastCall() { + return false + } + fn() + return true +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/conflict.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/conflict.go new file mode 100644 index 0000000000..cfa19d8d9a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/conflict.go @@ -0,0 +1,85 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// NewConflictError returns an error including details on the requests apply conflicts +func NewConflictError(conflicts merge.Conflicts) *errors.StatusError { + causes := []metav1.StatusCause{} + for _, conflict := range conflicts { + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseTypeFieldManagerConflict, + Message: fmt.Sprintf("conflict with %v", printManager(conflict.Manager)), + Field: conflict.Path.String(), + }) + } + return errors.NewApplyConflict(causes, getConflictMessage(conflicts)) +} + +func getConflictMessage(conflicts merge.Conflicts) string { + if len(conflicts) == 1 { + return fmt.Sprintf("Apply failed with 1 conflict: conflict with %v: %v", printManager(conflicts[0].Manager), conflicts[0].Path) + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + uniqueManagers := []string{} + for manager := range m { + uniqueManagers = append(uniqueManagers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(uniqueManagers) + + messages := []string{} + for _, manager := range uniqueManagers { + messages = append(messages, fmt.Sprintf("conflicts with %v:", printManager(manager))) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return fmt.Sprintf("Apply failed with %d conflicts: %s", len(conflicts), strings.Join(messages, "\n")) +} + +func printManager(manager string) string { + encodedManager := &metav1.ManagedFieldsEntry{} + if err := json.Unmarshal([]byte(manager), encodedManager); err != nil { + return fmt.Sprintf("%q", manager) + } + if encodedManager.Operation == metav1.ManagedFieldsOperationUpdate { + if encodedManager.Time == nil { + return fmt.Sprintf("%q using %v", encodedManager.Manager, encodedManager.APIVersion) + } + return fmt.Sprintf("%q using %v at %v", encodedManager.Manager, encodedManager.APIVersion, encodedManager.Time.UTC().Format(time.RFC3339)) + } + return fmt.Sprintf("%q", encodedManager.Manager) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fields.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fields.go new file mode 100644 index 0000000000..08186191a7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fields.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// EmptyFields represents a set with no paths +// It looks like metav1.Fields{Raw: []byte("{}")} +var EmptyFields = func() metav1.FieldsV1 { + f, err := SetToFields(*fieldpath.NewSet()) + if err != nil { + panic("should never happen") + } + return f +}() + +// FieldsToSet creates a set paths from an input trie of fields +func FieldsToSet(f metav1.FieldsV1) (s fieldpath.Set, err error) { + err = s.FromJSON(bytes.NewReader(f.Raw)) + return s, err +} + +// SetToFields creates a trie of fields from an input set of paths +func SetToFields(s fieldpath.Set) (f metav1.FieldsV1, err error) { + f.Raw, err = s.ToJSON() + return f, err +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/gvkparser.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/gvkparser.go new file mode 100644 index 0000000000..f8abcaf04e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/gvkparser.go @@ -0,0 +1,127 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/schemaconv" + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// groupVersionKindExtensionKey is the key used to lookup the +// GroupVersionKind value for an object definition from the +// definition's "extensions" map. +const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" + +// GvkParser contains a Parser that allows introspecting the schema. +type GvkParser struct { + gvks map[schema.GroupVersionKind]string + parser typed.Parser +} + +// Type returns a helper which can produce objects of the given type. Any +// errors are deferred until a further function is called. +func (p *GvkParser) Type(gvk schema.GroupVersionKind) *typed.ParseableType { + typeName, ok := p.gvks[gvk] + if !ok { + return nil + } + t := p.parser.Type(typeName) + return &t +} + +// NewGVKParser builds a GVKParser from a proto.Models. This +// will automatically find the proper version of the object, and the +// corresponding schema information. +func NewGVKParser(models proto.Models, preserveUnknownFields bool) (*GvkParser, error) { + typeSchema, err := schemaconv.ToSchemaWithPreserveUnknownFields(models, preserveUnknownFields) + if err != nil { + return nil, fmt.Errorf("failed to convert models to schema: %v", err) + } + parser := GvkParser{ + gvks: map[schema.GroupVersionKind]string{}, + } + parser.parser = typed.Parser{Schema: *typeSchema} + for _, modelName := range models.ListModels() { + model := models.LookupModel(modelName) + if model == nil { + panic(fmt.Sprintf("ListModels returns a model that can't be looked-up for: %v", modelName)) + } + gvkList := parseGroupVersionKind(model) + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + _, ok := parser.gvks[gvk] + if ok { + return nil, fmt.Errorf("duplicate entry for %v", gvk) + } + parser.gvks[gvk] = modelName + } + } + } + return &parser, nil +} + +// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind { + extensions := s.GetExtensions() + + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions[groupVersionKindExtensionKey] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + // gvk extension list must be a map with group, version, and + // kind fields + gvkMap, ok := gvk.(map[interface{}]interface{}) + if !ok { + continue + } + group, ok := gvkMap["group"].(string) + if !ok { + continue + } + version, ok := gvkMap["version"].(string) + if !ok { + continue + } + kind, ok := gvkMap["kind"].(string) + if !ok { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go new file mode 100644 index 0000000000..9a625e2acf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go @@ -0,0 +1,244 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type ManagedInterface interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +type managedStruct struct { + fields fieldpath.ManagedFields + times map[string]*metav1.Time +} + +var _ ManagedInterface = &managedStruct{} + +// Fields implements ManagedInterface. +func (m *managedStruct) Fields() fieldpath.ManagedFields { + return m.fields +} + +// Times implements ManagedInterface. +func (m *managedStruct) Times() map[string]*metav1.Time { + return m.times +} + +// NewEmptyManaged creates an empty ManagedInterface. +func NewEmptyManaged() ManagedInterface { + return NewManaged(fieldpath.ManagedFields{}, map[string]*metav1.Time{}) +} + +// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation. +func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface { + return &managedStruct{ + fields: f, + times: t, + } +} + +// RemoveObjectManagedFields removes the ManagedFields from the object +// before we merge so that it doesn't appear in the ManagedFields +// recursively. +func RemoveObjectManagedFields(obj runtime.Object) { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + accessor.SetManagedFields(nil) +} + +// DecodeObjectManagedFields extracts and converts the objects ManagedFields into a fieldpath.ManagedFields. +func DecodeObjectManagedFields(from []metav1.ManagedFieldsEntry) (ManagedInterface, error) { + managed, err := decodeManagedFields(from) + if err != nil { + return nil, fmt.Errorf("failed to convert managed fields from API: %v", err) + } + return &managed, nil +} + +// EncodeObjectManagedFields converts and stores the fieldpathManagedFields into the objects ManagedFields +func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + encodedManagedFields, err := encodeManagedFields(managed) + if err != nil { + return fmt.Errorf("failed to convert back managed fields to API: %v", err) + } + accessor.SetManagedFields(encodedManagedFields) + + return nil +} + +// decodeManagedFields converts ManagedFields from the wire format (api format) +// to the format used by sigs.k8s.io/structured-merge-diff +func decodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (managed managedStruct, err error) { + managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields)) + managed.times = make(map[string]*metav1.Time, len(encodedManagedFields)) + + for i, encodedVersionedSet := range encodedManagedFields { + switch encodedVersionedSet.FieldsType { + case "FieldsV1": + // Valid case. + case "": + return managedStruct{}, fmt.Errorf("missing fieldsType in managed fields entry %d", i) + default: + return managedStruct{}, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i) + } + manager, err := BuildManagerIdentifier(&encodedVersionedSet) + if err != nil { + return managedStruct{}, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err) + } + managed.fields[manager], err = decodeVersionedSet(&encodedVersionedSet) + if err != nil { + return managedStruct{}, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err) + } + managed.times[manager] = encodedVersionedSet.Time + } + return managed, nil +} + +// BuildManagerIdentifier creates a manager identifier string from a ManagedFieldsEntry +func BuildManagerIdentifier(encodedManager *metav1.ManagedFieldsEntry) (manager string, err error) { + encodedManagerCopy := *encodedManager + + // Never include fields type in the manager identifier + encodedManagerCopy.FieldsType = "" + + // Never include the fields in the manager identifier + encodedManagerCopy.FieldsV1 = nil + + // Never include the time in the manager identifier + encodedManagerCopy.Time = nil + + // For appliers, don't include the APIVersion in the manager identifier, + // so it will always have the same manager identifier each time it applied. + if encodedManager.Operation == metav1.ManagedFieldsOperationApply { + encodedManagerCopy.APIVersion = "" + } + + // Use the remaining fields to build the manager identifier + b, err := json.Marshal(&encodedManagerCopy) + if err != nil { + return "", fmt.Errorf("error marshalling manager identifier: %v", err) + } + + return string(b), nil +} + +func decodeVersionedSet(encodedVersionedSet *metav1.ManagedFieldsEntry) (versionedSet fieldpath.VersionedSet, err error) { + fields := EmptyFields + if encodedVersionedSet.FieldsV1 != nil { + fields = *encodedVersionedSet.FieldsV1 + } + set, err := FieldsToSet(fields) + if err != nil { + return nil, fmt.Errorf("error decoding set: %v", err) + } + return fieldpath.NewVersionedSet(&set, fieldpath.APIVersion(encodedVersionedSet.APIVersion), encodedVersionedSet.Operation == metav1.ManagedFieldsOperationApply), nil +} + +// encodeManagedFields converts ManagedFields from the format used by +// sigs.k8s.io/structured-merge-diff to the wire format (api format) +func encodeManagedFields(managed ManagedInterface) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) { + if len(managed.Fields()) == 0 { + return nil, nil + } + encodedManagedFields = []metav1.ManagedFieldsEntry{} + for manager := range managed.Fields() { + versionedSet := managed.Fields()[manager] + v, err := encodeManagerVersionedSet(manager, versionedSet) + if err != nil { + return nil, fmt.Errorf("error encoding versioned set for %v: %v", manager, err) + } + if t, ok := managed.Times()[manager]; ok { + v.Time = t + } + encodedManagedFields = append(encodedManagedFields, *v) + } + return sortEncodedManagedFields(encodedManagedFields) +} + +func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (sortedManagedFields []metav1.ManagedFieldsEntry, err error) { + sort.Slice(encodedManagedFields, func(i, j int) bool { + p, q := encodedManagedFields[i], encodedManagedFields[j] + + if p.Operation != q.Operation { + return p.Operation < q.Operation + } + + pSeconds, qSeconds := int64(0), int64(0) + if p.Time != nil { + pSeconds = p.Time.Unix() + } + if q.Time != nil { + qSeconds = q.Time.Unix() + } + if pSeconds != qSeconds { + return pSeconds < qSeconds + } + + if p.Manager != q.Manager { + return p.Manager < q.Manager + } + return p.APIVersion < q.APIVersion + }) + + return encodedManagedFields, nil +} + +func encodeManagerVersionedSet(manager string, versionedSet fieldpath.VersionedSet) (encodedVersionedSet *metav1.ManagedFieldsEntry, err error) { + encodedVersionedSet = &metav1.ManagedFieldsEntry{} + + // Get as many fields as we can from the manager identifier + err = json.Unmarshal([]byte(manager), encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error unmarshalling manager identifier %v: %v", manager, err) + } + + // Get the APIVersion, Operation, and Fields from the VersionedSet + encodedVersionedSet.APIVersion = string(versionedSet.APIVersion()) + if versionedSet.Applied() { + encodedVersionedSet.Operation = metav1.ManagedFieldsOperationApply + } + encodedVersionedSet.FieldsType = "FieldsV1" + fields, err := SetToFields(*versionedSet.Set()) + if err != nil { + return nil, fmt.Errorf("error encoding set: %v", err) + } + encodedVersionedSet.FieldsV1 = &fields + + return encodedVersionedSet, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/pathelement.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/pathelement.go new file mode 100644 index 0000000000..1954d65d32 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/pathelement.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +const ( + // Field indicates that the content of this path element is a field's name + Field = "f" + + // Value indicates that the content of this path element is a field's value + Value = "v" + + // Index indicates that the content of this path element is an index in an array + Index = "i" + + // Key indicates that the content of this path element is a key value map + Key = "k" + + // Separator separates the type of a path element from the contents + Separator = ":" +) + +// NewPathElement parses a serialized path element +func NewPathElement(s string) (fieldpath.PathElement, error) { + split := strings.SplitN(s, Separator, 2) + if len(split) < 2 { + return fieldpath.PathElement{}, fmt.Errorf("missing colon: %v", s) + } + switch split[0] { + case Field: + return fieldpath.PathElement{ + FieldName: &split[1], + }, nil + case Value: + val, err := value.FromJSON([]byte(split[1])) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Value: &val, + }, nil + case Index: + i, err := strconv.Atoi(split[1]) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Index: &i, + }, nil + case Key: + kv := map[string]json.RawMessage{} + err := json.Unmarshal([]byte(split[1]), &kv) + if err != nil { + return fieldpath.PathElement{}, err + } + fields := value.FieldList{} + for k, v := range kv { + b, err := json.Marshal(v) + if err != nil { + return fieldpath.PathElement{}, err + } + val, err := value.FromJSON(b) + if err != nil { + return fieldpath.PathElement{}, err + } + + fields = append(fields, value.Field{ + Name: k, + Value: val, + }) + } + return fieldpath.PathElement{ + Key: &fields, + }, nil + default: + // Ignore unknown key types + return fieldpath.PathElement{}, nil + } +} + +// PathElementString serializes a path element +func PathElementString(pe fieldpath.PathElement) (string, error) { + switch { + case pe.FieldName != nil: + return Field + Separator + *pe.FieldName, nil + case pe.Key != nil: + kv := map[string]json.RawMessage{} + for _, k := range *pe.Key { + b, err := value.ToJSON(k.Value) + if err != nil { + return "", err + } + m := json.RawMessage{} + err = json.Unmarshal(b, &m) + if err != nil { + return "", err + } + kv[k.Name] = m + } + b, err := json.Marshal(kv) + if err != nil { + return "", err + } + return Key + ":" + string(b), nil + case pe.Value != nil: + b, err := value.ToJSON(*pe.Value) + if err != nil { + return "", err + } + return Value + ":" + string(b), nil + case pe.Index != nil: + return Index + ":" + strconv.Itoa(*pe.Index), nil + default: + return "", errors.New("Invalid type of path element") + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedmanager.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedmanager.go new file mode 100644 index 0000000000..4b07d462a2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedmanager.go @@ -0,0 +1,172 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "encoding/json" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +type lastAppliedManager struct { + fieldManager Manager + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + groupVersion schema.GroupVersion +} + +var _ Manager = &lastAppliedManager{} + +// NewLastAppliedManager converts the client-side apply annotation to +// server-side apply managed fields +func NewLastAppliedManager(fieldManager Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, groupVersion schema.GroupVersion) Manager { + return &lastAppliedManager{ + fieldManager: fieldManager, + typeConverter: typeConverter, + objectConverter: objectConverter, + groupVersion: groupVersion, + } +} + +// Update implements Manager. +func (f *lastAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply will consider the last-applied annotation +// for upgrading an object managed by client-side apply to server-side apply +// without conflicts. +func (f *lastAppliedManager) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newLiveObj, newManaged, newErr := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + // Upgrade the client-side apply annotation only from kubectl server-side-apply. + // To opt-out of this behavior, users may specify a different field manager. + if manager != "kubectl" { + return newLiveObj, newManaged, newErr + } + + // Check if we have conflicts + if newErr == nil { + return newLiveObj, newManaged, newErr + } + conflicts, ok := newErr.(merge.Conflicts) + if !ok { + return newLiveObj, newManaged, newErr + } + conflictSet := conflictsToSet(conflicts) + + // Check if conflicts are allowed due to client-side apply, + // and if so, then force apply + allowedConflictSet, err := f.allowedConflictsFromLastApplied(liveObj) + if err != nil { + return newLiveObj, newManaged, newErr + } + if !conflictSet.Difference(allowedConflictSet).Empty() { + newConflicts := conflictsDifference(conflicts, allowedConflictSet) + return newLiveObj, newManaged, newConflicts + } + + return f.fieldManager.Apply(liveObj, newObj, managed, manager, true) +} + +func (f *lastAppliedManager) allowedConflictsFromLastApplied(liveObj runtime.Object) (*fieldpath.Set, error) { + var accessor, err = meta.Accessor(liveObj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // If there is no client-side apply annotation, then there is nothing to do + var annotations = accessor.GetAnnotations() + if annotations == nil { + return nil, fmt.Errorf("no last applied annotation") + } + var lastApplied, ok = annotations[corev1.LastAppliedConfigAnnotation] + if !ok || lastApplied == "" { + return nil, fmt.Errorf("no last applied annotation") + } + + liveObjVersioned, err := f.objectConverter.ConvertToVersion(liveObj, f.groupVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to versioned: %v", err) + } + + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to typed: %v", err) + } + + var lastAppliedObj = &unstructured.Unstructured{Object: map[string]interface{}{}} + err = json.Unmarshal([]byte(lastApplied), lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to decode last applied obj: %v in '%s'", err, lastApplied) + } + + if lastAppliedObj.GetAPIVersion() != f.groupVersion.String() { + return nil, fmt.Errorf("expected version of last applied to match live object '%s', but got '%s': %v", f.groupVersion.String(), lastAppliedObj.GetAPIVersion(), err) + } + + lastAppliedObjTyped, err := f.typeConverter.ObjectToTyped(lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to convert last applied to typed: %v", err) + } + + lastAppliedObjFieldSet, err := lastAppliedObjTyped.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create fieldset for last applied object: %v", err) + } + + comparison, err := lastAppliedObjTyped.Compare(liveObjTyped) + if err != nil { + return nil, fmt.Errorf("failed to compare last applied object and live object: %v", err) + } + + // Remove fields in last applied that are different, added, or missing in + // the live object. + // Because last-applied fields don't match the live object fields, + // then we don't own these fields. + lastAppliedObjFieldSet = lastAppliedObjFieldSet. + Difference(comparison.Modified). + Difference(comparison.Added). + Difference(comparison.Removed) + + return lastAppliedObjFieldSet, nil +} + +// TODO: replace with merge.Conflicts.ToSet() +func conflictsToSet(conflicts merge.Conflicts) *fieldpath.Set { + conflictSet := fieldpath.NewSet() + for _, conflict := range []merge.Conflict(conflicts) { + conflictSet.Insert(conflict.Path) + } + return conflictSet +} + +func conflictsDifference(conflicts merge.Conflicts, s *fieldpath.Set) merge.Conflicts { + newConflicts := []merge.Conflict{} + for _, conflict := range []merge.Conflict(conflicts) { + if !s.Has(conflict.Path) { + newConflicts = append(newConflicts, conflict) + } + } + return newConflicts +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go new file mode 100644 index 0000000000..91e2e96914 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go @@ -0,0 +1,117 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +type lastAppliedUpdater struct { + fieldManager Manager +} + +var _ Manager = &lastAppliedUpdater{} + +// NewLastAppliedUpdater sets the client-side apply annotation up to date with +// server-side apply managed fields +func NewLastAppliedUpdater(fieldManager Manager) Manager { + return &lastAppliedUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *lastAppliedUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// server-side apply managed fields +func (f *lastAppliedUpdater) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + liveObj, managed, err := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + if err != nil { + return liveObj, managed, err + } + + // Sync the client-side apply annotation only from kubectl server-side apply. + // To opt-out of this behavior, users may specify a different field manager. + // + // If the client-side apply annotation doesn't exist, + // then continue because we have no annotation to update + if manager == "kubectl" && hasLastApplied(liveObj) { + lastAppliedValue, err := buildLastApplied(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to build last-applied annotation: %v", err) + } + err = setLastApplied(liveObj, lastAppliedValue) + if err != nil { + return nil, nil, fmt.Errorf("failed to set last-applied annotation: %v", err) + } + } + return liveObj, managed, err +} + +func hasLastApplied(obj runtime.Object) bool { + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + return false + } + _, ok := annotations[corev1.LastAppliedConfigAnnotation] + return ok +} + +func setLastApplied(obj runtime.Object, value string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[corev1.LastAppliedConfigAnnotation] = value + accessor.SetAnnotations(annotations) + return nil +} + +func buildLastApplied(obj runtime.Object) (string, error) { + obj = obj.DeepCopyObject() + + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // Remove the annotation from the object before encoding the object + var annotations = accessor.GetAnnotations() + delete(annotations, corev1.LastAppliedConfigAnnotation) + accessor.SetAnnotations(annotations) + + lastApplied, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return "", fmt.Errorf("couldn't encode object into last applied annotation: %v", err) + } + return string(lastApplied), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go new file mode 100644 index 0000000000..9bee82a854 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go @@ -0,0 +1,86 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type managedFieldsUpdater struct { + fieldManager Manager +} + +var _ Manager = &managedFieldsUpdater{} + +// NewManagedFieldsUpdater is responsible for updating the managedfields +// in the object, updating the time of the operation as necessary. For +// updates, it uses a hard-coded manager to detect if things have +// changed, and swaps back the correct manager after the operation is +// done. +func NewManagedFieldsUpdater(fieldManager Manager) Manager { + return &managedFieldsUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *managedFieldsUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + self := "current-operation" + formerSet := managed.Fields()[manager] + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, self) + if err != nil { + return object, managed, err + } + + // If the current operation took any fields from anything, it means the object changed, + // so update the timestamp of the managedFieldsEntry and merge with any previous updates from the same manager + if vs, ok := managed.Fields()[self]; ok { + delete(managed.Fields(), self) + + if previous, ok := managed.Fields()[manager]; ok { + managed.Fields()[manager] = fieldpath.NewVersionedSet(vs.Set().Union(previous.Set()), vs.APIVersion(), vs.Applied()) + } else { + managed.Fields()[manager] = vs + } + // Update the time only if the manager's fieldSet has changed. + if formerSet == nil || !managed.Fields()[manager].Set().Equals(formerSet.Set()) { + managed.Times()[manager] = &metav1.Time{Time: time.Now().UTC()} + } + } + + return object, managed, nil +} + +// Apply implements Manager. +func (f *managedFieldsUpdater) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + formerManaged := managed.Fields().Copy() + object, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) + if err != nil { + return object, managed, err + } + if object != nil || !managed.Fields().Equals(formerManaged) { + managed.Times()[fieldManager] = &metav1.Time{Time: time.Now().UTC()} + } + if object == nil { + object = liveObj + } + return object, managed, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml new file mode 100644 index 0000000000..13a14cf449 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml @@ -0,0 +1,259 @@ +apiVersion: v1 +kind: Node +metadata: + annotations: + container.googleapis.com/instance_id: "123456789321654789" + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2019-07-09T16:17:29Z" + labels: + kubernetes.io/arch: amd64 + beta.kubernetes.io/fluentd-ds-ready: "true" + beta.kubernetes.io/instance-type: n1-standard-4 + kubernetes.io/os: linux + cloud.google.com/gke-nodepool: default-pool + cloud.google.com/gke-os-distribution: cos + failure-domain.beta.kubernetes.io/region: us-central1 + failure-domain.beta.kubernetes.io/zone: us-central1-b + kubernetes.io/hostname: node-default-pool-something + name: node-default-pool-something + resourceVersion: "211582541" + selfLink: /api/v1/nodes/node-default-pool-something + uid: 0c24d0e1-a265-11e9-abe4-42010a80026b +spec: + podCIDR: 10.0.0.1/24 + providerID: some-provider-id-of-some-sort +status: + addresses: + - address: 10.0.0.1 + type: InternalIP + - address: 192.168.0.1 + type: ExternalIP + - address: node-default-pool-something + type: Hostname + allocatable: + cpu: 3920m + ephemeral-storage: "104638878617" + hugepages-2Mi: "0" + memory: 12700100Ki + pods: "110" + capacity: + cpu: "4" + ephemeral-storage: 202086868Ki + hugepages-2Mi: "0" + memory: 15399364Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:08Z" + message: containerd is functioning properly + reason: FrequentContainerdRestart + status: "False" + type: FrequentContainerdRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker overlay2 is functioning properly + reason: CorruptDockerOverlay2 + status: "False" + type: CorruptDockerOverlay2 + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: node is functioning properly + reason: UnregisterNetDevice + status: "False" + type: FrequentUnregisterNetDevice + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: kernel has no deadlock + reason: KernelHasNoDeadlock + status: "False" + type: KernelDeadlock + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: Filesystem is not read-only + reason: FilesystemIsNotReadOnly + status: "False" + type: ReadonlyFilesystem + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:05Z" + message: kubelet is functioning properly + reason: FrequentKubeletRestart + status: "False" + type: FrequentKubeletRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker is functioning properly + reason: FrequentDockerRestart + status: "False" + type: FrequentDockerRestart + - lastHeartbeatTime: "2019-07-09T16:17:47Z" + lastTransitionTime: "2019-07-09T16:17:47Z" + message: RouteController created a route + reason: RouteCreated + status: "False" + type: NetworkUnavailable + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient disk space available + reason: KubeletHasSufficientDisk + status: "False" + type: OutOfDisk + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:49Z" + message: kubelet is posting ready status. AppArmor enabled + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - grafana/grafana@sha256:80e5e113a984d74836aa16f5b4524012099436b1a50df293f00ac6377fb512c8 + - grafana/grafana:4.4.2 + sizeBytes: 287008013 + - names: + - k8s.gcr.io/node-problem-detector@sha256:f95cab985c26b2f46e9bd43283e0bfa88860c14e0fb0649266babe8b65e9eb2b + - k8s.gcr.io/node-problem-detector:v0.4.1 + sizeBytes: 286572743 + - names: + - grafana/grafana@sha256:7ff7f9b2501a5d55b55ce3f58d21771b1c5af1f2a4ab7dbf11bef7142aae7033 + - grafana/grafana:4.2.0 + sizeBytes: 277940263 + - names: + - influxdb@sha256:7dddf03376348876ed4bdf33d6dfa3326f45a2bae0930dbd80781a374eb519bc + - influxdb:1.2.2 + sizeBytes: 223948571 + - names: + - gcr.io/stackdriver-agents/stackdriver-logging-agent@sha256:f8d5231b67b9c53f60068b535a11811d29d1b3efd53d2b79f2a2591ea338e4f2 + - gcr.io/stackdriver-agents/stackdriver-logging-agent:0.6-1.6.0-1 + sizeBytes: 223242132 + - names: + - nginx@sha256:35779791c05d119df4fe476db8f47c0bee5943c83eba5656a15fc046db48178b + - nginx:1.10.1 + sizeBytes: 180708613 + - names: + - k8s.gcr.io/fluentd-elasticsearch@sha256:b8c94527b489fb61d3d81ce5ad7f3ddbb7be71e9620a3a36e2bede2f2e487d73 + - k8s.gcr.io/fluentd-elasticsearch:v2.0.4 + sizeBytes: 135716379 + - names: + - nginx@sha256:00be67d6ba53d5318cd91c57771530f5251cfbe028b7be2c4b70526f988cfc9f + - nginx:latest + sizeBytes: 109357355 + - names: + - k8s.gcr.io/kubernetes-dashboard-amd64@sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0 + - k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3 + sizeBytes: 102319441 + - names: + - gcr.io/google_containers/kube-proxy:v1.11.10-gke.5 + - k8s.gcr.io/kube-proxy:v1.11.10-gke.5 + sizeBytes: 102279340 + - names: + - k8s.gcr.io/event-exporter@sha256:7f9cd7cb04d6959b0aa960727d04fa86759008048c785397b7b0d9dff0007516 + - k8s.gcr.io/event-exporter:v0.2.3 + sizeBytes: 94171943 + - names: + - k8s.gcr.io/prometheus-to-sd@sha256:6c0c742475363d537ff059136e5d5e4ab1f512ee0fd9b7ca42ea48bc309d1662 + - k8s.gcr.io/prometheus-to-sd:v0.3.1 + sizeBytes: 88077694 + - names: + - k8s.gcr.io/fluentd-gcp-scaler@sha256:a5ace7506d393c4ed65eb2cbb6312c64ab357fcea16dff76b9055bc6e498e5ff + - k8s.gcr.io/fluentd-gcp-scaler:0.5.1 + sizeBytes: 86637208 + - names: + - k8s.gcr.io/heapster-amd64@sha256:9fae0af136ce0cf4f88393b3670f7139ffc464692060c374d2ae748e13144521 + - k8s.gcr.io/heapster-amd64:v1.6.0-beta.1 + sizeBytes: 76016169 + - names: + - k8s.gcr.io/ingress-glbc-amd64@sha256:31d36bbd9c44caffa135fc78cf0737266fcf25e3cf0cd1c2fcbfbc4f7309cc52 + - k8s.gcr.io/ingress-glbc-amd64:v1.1.1 + sizeBytes: 67801919 + - names: + - k8s.gcr.io/kube-addon-manager@sha256:d53486c3a0b49ebee019932878dc44232735d5622a51dbbdcec7124199020d09 + - k8s.gcr.io/kube-addon-manager:v8.7 + sizeBytes: 63322109 + - names: + - nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315 + - nginx:1.10-alpine + sizeBytes: 54042627 + - names: + - k8s.gcr.io/cpvpa-amd64@sha256:cfe7b0a11c9c8e18c87b1eb34fef9a7cbb8480a8da11fc2657f78dbf4739f869 + - k8s.gcr.io/cpvpa-amd64:v0.6.0 + sizeBytes: 51785854 + - names: + - k8s.gcr.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d + - k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2 + sizeBytes: 49648481 + - names: + - k8s.gcr.io/ip-masq-agent-amd64@sha256:1ffda57d87901bc01324c82ceb2145fe6a0448d3f0dd9cb65aa76a867cd62103 + - k8s.gcr.io/ip-masq-agent-amd64:v2.1.1 + sizeBytes: 49612505 + - names: + - k8s.gcr.io/k8s-dns-kube-dns-amd64@sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8 + - k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10 + sizeBytes: 49549457 + - names: + - k8s.gcr.io/rescheduler@sha256:156cfbfd05a5a815206fd2eeb6cbdaf1596d71ea4b415d3a6c43071dd7b99450 + - k8s.gcr.io/rescheduler:v0.4.0 + sizeBytes: 48973149 + - names: + - k8s.gcr.io/event-exporter@sha256:16ca66e2b5dc7a1ce6a5aafcb21d0885828b75cdfc08135430480f7ad2364adc + - k8s.gcr.io/event-exporter:v0.2.4 + sizeBytes: 47261019 + - names: + - k8s.gcr.io/coredns@sha256:db2bf53126ed1c761d5a41f24a1b82a461c85f736ff6e90542e9522be4757848 + - k8s.gcr.io/coredns:1.1.3 + sizeBytes: 45587362 + - names: + - prom/prometheus@sha256:483f4c9d7733699ba79facca9f8bcce1cef1af43dfc3e7c5a1882aa85f53cb74 + - prom/prometheus:v1.1.3 + sizeBytes: 45493941 + nodeInfo: + architecture: amd64 + bootID: a32eca78-4ad4-4b76-9252-f143d6c2ae61 + containerRuntimeVersion: docker://17.3.2 + kernelVersion: 4.14.127+ + kubeProxyVersion: v1.11.10-gke.5 + kubeletVersion: v1.11.10-gke.5 + machineID: 1739555e5b231057f0f9a0b5fa29511b + operatingSystem: linux + osImage: Container-Optimized OS from Google + systemUUID: 1739555E-5B23-1057-F0F9-A0B5FA29511B + volumesAttached: + - devicePath: /dev/disk/by-id/b9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/b9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + volumesInUse: + - kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml new file mode 100644 index 0000000000..3fb0877d67 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml @@ -0,0 +1,121 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: some-app + plugin1: some-value + plugin2: some-value + plugin3: some-value + plugin4: some-value + name: some-name + namespace: default + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: some-name + uid: 0a9d2b9e-779e-11e7-b422-42010a8001be +spec: + containers: + - args: + - one + - two + - three + - four + - five + - six + - seven + - eight + - nine + env: + - name: VAR_3 + valueFrom: + secretKeyRef: + key: some-other-key + name: some-oher-name + - name: VAR_2 + valueFrom: + secretKeyRef: + key: other-key + name: other-name + - name: VAR_1 + valueFrom: + secretKeyRef: + key: some-key + name: some-name + image: some-image-name + imagePullPolicy: IfNotPresent + name: some-name + resources: + requests: + cpu: '0' + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-hu5jz + readOnly: true + dnsPolicy: ClusterFirst + nodeName: node-name + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - name: default-token-hu5jz + secret: + defaultMode: 420 + secretName: default-token-hu5jz +status: + conditions: + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: Initialized + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:41:59Z' + status: 'True' + type: Ready + - lastProbeTime: null + lastTransitionTime: null + status: 'True' + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: PodScheduled + containerStatuses: + - containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376 + image: some-image-name + imageID: docker-pullable://some-image-id + lastState: + terminated: + containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565 + exitCode: 255 + finishedAt: '2019-07-08T09:39:09Z' + reason: Error + startedAt: '2019-07-08T09:38:54Z' + name: name + ready: true + restartCount: 6 + state: + running: + startedAt: '2019-07-08T09:41:59Z' + hostIP: 10.0.0.1 + phase: Running + podIP: 10.0.0.1 + qosClass: BestEffort + startTime: '2019-07-08T09:31:18Z' diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go new file mode 100644 index 0000000000..a8c34ad652 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + "math/rand" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type skipNonAppliedManager struct { + fieldManager Manager + objectCreater runtime.ObjectCreater + gvk schema.GroupVersionKind + beforeApplyManagerName string + probability float32 +} + +var _ Manager = &skipNonAppliedManager{} + +// NewSkipNonAppliedManager creates a new wrapped FieldManager that only starts tracking managers after the first apply. +func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind) Manager { + return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, gvk, 0.0) +} + +// NewProbabilisticSkipNonAppliedManager creates a new wrapped FieldManager that starts tracking managers after the first apply, +// or starts tracking on create with p probability. +func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind, p float32) Manager { + return &skipNonAppliedManager{ + fieldManager: fieldManager, + objectCreater: objectCreater, + gvk: gvk, + beforeApplyManagerName: "before-first-apply", + probability: p, + } +} + +// Update implements Manager. +func (f *skipNonAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + accessor, err := meta.Accessor(liveObj) + if err != nil { + return newObj, managed, nil + } + + // If managed fields is empty, we need to determine whether to skip tracking managed fields. + if len(managed.Fields()) == 0 { + // Check if the operation is a create, by checking whether lastObj's UID is empty. + // If the operation is create, P(tracking managed fields) = f.probability + // If the operation is update, skip tracking managed fields, since we already know managed fields is empty. + if len(accessor.GetUID()) == 0 { + if f.probability <= rand.Float32() { + return newObj, managed, nil + } + } else { + return newObj, managed, nil + } + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + if len(managed.Fields()) == 0 { + emptyObj, err := f.objectCreater.New(f.gvk) + if err != nil { + return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", f.gvk, err) + } + liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName) + if err != nil { + return nil, nil, fmt.Errorf("failed to create manager for existing fields: %v", err) + } + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go new file mode 100644 index 0000000000..1460d9c802 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type stripMetaManager struct { + fieldManager Manager + + // stripSet is the list of fields that should never be part of a mangedFields. + stripSet *fieldpath.Set +} + +var _ Manager = &stripMetaManager{} + +// NewStripMetaManager creates a new Manager that strips metadata and typemeta fields from the manager's fieldset. +func NewStripMetaManager(fieldManager Manager) Manager { + return &stripMetaManager{ + fieldManager: fieldManager, + stripSet: fieldpath.NewSet( + fieldpath.MakePathOrDie("apiVersion"), + fieldpath.MakePathOrDie("kind"), + fieldpath.MakePathOrDie("metadata"), + fieldpath.MakePathOrDie("metadata", "name"), + fieldpath.MakePathOrDie("metadata", "namespace"), + fieldpath.MakePathOrDie("metadata", "creationTimestamp"), + fieldpath.MakePathOrDie("metadata", "selfLink"), + fieldpath.MakePathOrDie("metadata", "uid"), + fieldpath.MakePathOrDie("metadata", "clusterName"), + fieldpath.MakePathOrDie("metadata", "generation"), + fieldpath.MakePathOrDie("metadata", "managedFields"), + fieldpath.MakePathOrDie("metadata", "resourceVersion"), + ), + } +} + +// Update implements Manager. +func (f *stripMetaManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *stripMetaManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// stripFields removes a predefined set of paths found in typed from managed +func (f *stripMetaManager) stripFields(managed fieldpath.ManagedFields, manager string) { + vs, ok := managed[manager] + if ok { + if vs == nil { + panic(fmt.Sprintf("Found unexpected nil manager which should never happen: %s", manager)) + } + newSet := vs.Set().Difference(f.stripSet) + if newSet.Empty() { + delete(managed, manager) + } else { + managed[manager] = fieldpath.NewVersionedSet(newSet, vs.APIVersion(), vs.Applied()) + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go new file mode 100644 index 0000000000..216a39cf7a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +type structuredMergeManager struct { + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + objectDefaulter runtime.ObjectDefaulter + groupVersion schema.GroupVersion + hubVersion schema.GroupVersion + updater merge.Updater +} + +var _ Manager = &structuredMergeManager{} + +// NewStructuredMergeManager creates a new Manager that merges apply requests +// and update managed fields for other types of requests. +func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion) (Manager, error) { + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s + }, + }, nil +} + +// NewCRDStructuredMergeManager creates a new Manager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion) (_ Manager, err error) { + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newCRDVersionConverter(typeConverter, objectConverter, hub), + }, + }, nil +} + +// Update implements Manager. +func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object to proper version: %v", err) + } + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object to proper version: %v", err) + } + newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", newObjVersioned.GetObjectKind().GroupVersionKind(), err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", liveObjVersioned.GetObjectKind().GroupVersionKind(), err) + } + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + + // TODO(apelisse) use the first return value when unions are implemented + _, managedFields, err := f.updater.Update(liveObjTyped, newObjTyped, apiVersion, managed.Fields(), manager) + if err != nil { + return nil, nil, fmt.Errorf("failed to update ManagedFields: %v", err) + } + managed = internal.NewManaged(managedFields, managed.Times()) + + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + // Check that the patch object has the same version as the live object + if patchVersion := patchObj.GetObjectKind().GroupVersionKind().GroupVersion(); patchVersion != f.groupVersion { + return nil, nil, + errors.NewBadRequest( + fmt.Sprintf("Incorrect version specified in apply patch. "+ + "Specified patch version: %s, expected: %s", + patchVersion, f.groupVersion)) + } + + patchObjMeta, err := meta.Accessor(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("couldn't get accessor: %v", err) + } + if patchObjMeta.GetManagedFields() != nil { + return nil, nil, errors.NewBadRequest(fmt.Sprintf("metadata.managedFields must be nil")) + } + + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object to proper version: %v", err) + } + + patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed patch object: %v", err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed live object: %v", err) + } + + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + newObjTyped, managedFields, err := f.updater.Apply(liveObjTyped, patchObjTyped, apiVersion, managed.Fields(), manager, force) + if err != nil { + return nil, nil, err + } + managed = internal.NewManaged(managedFields, managed.Times()) + + if newObjTyped == nil { + return nil, managed, nil + } + + newObj, err := f.typeConverter.TypedToObject(newObjTyped) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new typed object to object: %v", err) + } + + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object to proper version: %v", err) + } + f.objectDefaulter.Default(newObjVersioned) + + newObjUnversioned, err := f.toUnversioned(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert to unversioned: %v", err) + } + return newObjUnversioned, managed, nil +} + +func (f *structuredMergeManager) toVersioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.groupVersion) +} + +func (f *structuredMergeManager) toUnversioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.hubVersion) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go new file mode 100644 index 0000000000..8bb2f0c9eb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go @@ -0,0 +1,130 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter interface { + ObjectToTyped(runtime.Object) (*typed.TypedValue, error) + TypedToObject(*typed.TypedValue) (runtime.Object, error) +} + +// DeducedTypeConverter is a TypeConverter for CRDs that don't have a +// schema. It does implement the same interface though (and create the +// same types of objects), so that everything can still work the same. +// CRDs are merged with all their fields being "atomic" (lists +// included). +// +// Note that this is not going to be sufficient for converting to/from +// CRDs that have a schema defined (we don't support that schema yet). +// TODO(jennybuckley): Use the schema provided by a CRD if it exists. +type DeducedTypeConverter struct{} + +var _ TypeConverter = DeducedTypeConverter{} + +// ObjectToTyped converts an object into a TypedValue with a "deduced type". +func (DeducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { + switch o := obj.(type) { + case *unstructured.Unstructured: + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent()) + default: + return typed.DeducedParseableType.FromStructured(obj) + } +} + +// TypedToObject transforms the typed value into a runtime.Object. That +// is not specific to deduced type. +func (DeducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +type typeConverter struct { + parser *internal.GvkParser +} + +var _ TypeConverter = &typeConverter{} + +// NewTypeConverter builds a TypeConverter from a proto.Models. This +// will automatically find the proper version of the object, and the +// corresponding schema information. +func NewTypeConverter(models proto.Models, preserveUnknownFields bool) (TypeConverter, error) { + parser, err := internal.NewGVKParser(models, preserveUnknownFields) + if err != nil { + return nil, err + } + return &typeConverter{parser: parser}, nil +} + +func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + t := c.parser.Type(gvk) + if t == nil { + return nil, newNoCorrespondingTypeError(gvk) + } + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent()) + default: + return t.FromStructured(obj) + } +} + +func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +func valueToObject(val value.Value) (runtime.Object, error) { + vu := val.Unstructured() + switch o := vu.(type) { + case map[string]interface{}: + return &unstructured.Unstructured{Object: o}, nil + default: + return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) + } +} + +type noCorrespondingTypeErr struct { + gvk schema.GroupVersionKind +} + +func newNoCorrespondingTypeError(gvk schema.GroupVersionKind) error { + return &noCorrespondingTypeErr{gvk: gvk} +} + +func (k *noCorrespondingTypeErr) Error() string { + return fmt.Sprintf("no corresponding type for %v", k.gvk) +} + +func isNoCorrespondingTypeError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*noCorrespondingTypeErr) + return ok +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go new file mode 100644 index 0000000000..477e92f796 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go @@ -0,0 +1,101 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// versionConverter is an implementation of +// sigs.k8s.io/structured-merge-diff/merge.Converter +type versionConverter struct { + typeConverter TypeConverter + objectConvertor runtime.ObjectConvertor + hubGetter func(from schema.GroupVersion) schema.GroupVersion +} + +var _ merge.Converter = &versionConverter{} + +// NewVersionConverter builds a VersionConverter from a TypeConverter and an ObjectConvertor. +func newVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return schema.GroupVersion{ + Group: from.Group, + Version: h.Version, + } + }, + } +} + +// NewCRDVersionConverter builds a VersionConverter for CRDs from a TypeConverter and an ObjectConvertor. +func newCRDVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return h + }, + } +} + +// Convert implements sigs.k8s.io/structured-merge-diff/merge.Converter +func (v *versionConverter) Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) { + // Convert the smd typed value to a kubernetes object. + objectToConvert, err := v.typeConverter.TypedToObject(object) + if err != nil { + return object, err + } + + // Parse the target groupVersion. + groupVersion, err := schema.ParseGroupVersion(string(version)) + if err != nil { + return object, err + } + + // If attempting to convert to the same version as we already have, just return it. + fromVersion := objectToConvert.GetObjectKind().GroupVersionKind().GroupVersion() + if fromVersion == groupVersion { + return object, nil + } + + // Convert to internal + internalObject, err := v.objectConvertor.ConvertToVersion(objectToConvert, v.hubGetter(fromVersion)) + if err != nil { + return object, err + } + + // Convert the object into the target version + convertedObject, err := v.objectConvertor.ConvertToVersion(internalObject, groupVersion) + if err != nil { + return object, err + } + + // Convert the object back to a smd typed value and return it. + return v.typeConverter.ObjectToTyped(convertedObject) +} + +// IsMissingVersionError +func (v *versionConverter) IsMissingVersionError(err error) bool { + return runtime.IsNotRegisteredError(err) || isNoCorrespondingTypeError(err) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go new file mode 100644 index 0000000000..c3f6e4cbe1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -0,0 +1,287 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "context" + "fmt" + metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" + "k8s.io/apimachinery/pkg/runtime/schema" + "math/rand" + "net/http" + "net/url" + "strings" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" + utiltrace "k8s.io/utils/trace" +) + +// getterFunc performs a get request with the given context and object name. The request +// may be used to deserialize an options object to pass to the getter. +type getterFunc func(ctx context.Context, name string, req *http.Request, trace *utiltrace.Trace) (runtime.Object, error) + +// getResourceHandler is an HTTP handler function for get requests. It delegates to the +// passed-in getterFunc to perform the actual get. +func getResourceHandler(scope *RequestScope, getter getterFunc) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + trace := utiltrace.New("Get", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) + defer trace.LogIfLong(500 * time.Millisecond) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + ctx := req.Context() + ctx = request.WithNamespace(ctx, namespace) + + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + result, err := getter(ctx, name, req, trace) + if err != nil { + scope.err(err, w, req) + return + } + + trace.Step("About to write a response") + transformResponseObject(ctx, scope, trace, req, w, http.StatusOK, outputMediaType, result) + trace.Step("Transformed response object") + } +} + +// GetResource returns a function that handles retrieving a single resource from a rest.Storage object. +func GetResource(r rest.Getter, e rest.Exporter, scope *RequestScope) http.HandlerFunc { + return getResourceHandler(scope, + func(ctx context.Context, name string, req *http.Request, trace *utiltrace.Trace) (runtime.Object, error) { + // check for export + options := metav1.GetOptions{} + if values := req.URL.Query(); len(values) > 0 { + exports := metav1.ExportOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &exports); err != nil { + err = errors.NewBadRequest(err.Error()) + return nil, err + } + if exports.Export { + if e == nil { + return nil, errors.NewBadRequest(fmt.Sprintf("export of %q is not supported", scope.Resource.Resource)) + } + return e.Export(ctx, name, exports) + } + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &options); err != nil { + err = errors.NewBadRequest(err.Error()) + return nil, err + } + } + if trace != nil { + trace.Step("About to Get from storage") + } + return r.Get(ctx, name, &options) + }) +} + +// GetResourceWithOptions returns a function that handles retrieving a single resource from a rest.Storage object. +func GetResourceWithOptions(r rest.GetterWithOptions, scope *RequestScope, isSubresource bool) http.HandlerFunc { + return getResourceHandler(scope, + func(ctx context.Context, name string, req *http.Request, trace *utiltrace.Trace) (runtime.Object, error) { + opts, subpath, subpathKey := r.NewGetOptions() + trace.Step("About to process Get options") + if err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil { + err = errors.NewBadRequest(err.Error()) + return nil, err + } + if trace != nil { + trace.Step("About to Get from storage") + } + return r.Get(ctx, name, opts) + }) +} + +// getRequestOptions parses out options and can include path information. The path information shouldn't include the subresource. +func getRequestOptions(req *http.Request, scope *RequestScope, into runtime.Object, subpath bool, subpathKey string, isSubresource bool) error { + if into == nil { + return nil + } + + query := req.URL.Query() + if subpath { + newQuery := make(url.Values) + for k, v := range query { + newQuery[k] = v + } + + ctx := req.Context() + requestInfo, _ := request.RequestInfoFrom(ctx) + startingIndex := 2 + if isSubresource { + startingIndex = 3 + } + + p := strings.Join(requestInfo.Parts[startingIndex:], "/") + + // ensure non-empty subpaths correctly reflect a leading slash + if len(p) > 0 && !strings.HasPrefix(p, "/") { + p = "/" + p + } + + // ensure subpaths correctly reflect the presence of a trailing slash on the original request + if strings.HasSuffix(requestInfo.Path, "/") && !strings.HasSuffix(p, "/") { + p += "/" + } + + newQuery[subpathKey] = []string{p} + query = newQuery + } + return scope.ParameterCodec.DecodeParameters(query, scope.Kind.GroupVersion(), into) +} + +func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatch bool, minRequestTimeout time.Duration) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("List", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) + + namespace, err := scope.Namer.Namespace(req) + if err != nil { + scope.err(err, w, req) + return + } + + // Watches for single objects are routed to this function. + // Treat a name parameter the same as a field selector entry. + hasName := true + _, name, err := scope.Namer.Name(req) + if err != nil { + hasName = false + } + + ctx := req.Context() + ctx = request.WithNamespace(ctx, namespace) + + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + opts := metainternalversion.ListOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &opts); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + + if errs := metainternalversionvalidation.ValidateListOptions(&opts); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs) + scope.err(err, w, req) + return + } + + // transform fields + // TODO: DecodeParametersInto should do this. + if opts.FieldSelector != nil { + fn := func(label, value string) (newLabel, newValue string, err error) { + return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value) + } + if opts.FieldSelector, err = opts.FieldSelector.Transform(fn); err != nil { + // TODO: allow bad request to set field causes based on query parameters + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + } + + if hasName { + // metadata.name is the canonical internal name. + // SelectionPredicate will notice that this is a request for + // a single object and optimize the storage query accordingly. + nameSelector := fields.OneTermEqualSelector("metadata.name", name) + + // Note that fieldSelector setting explicitly the "metadata.name" + // will result in reaching this branch (as the value of that field + // is propagated to requestInfo as the name parameter. + // That said, the allowed field selectors in this branch are: + // nil, fields.Everything and field selector matching metadata.name + // for our name. + if opts.FieldSelector != nil && !opts.FieldSelector.Empty() { + selectedName, ok := opts.FieldSelector.RequiresExactMatch("metadata.name") + if !ok || name != selectedName { + scope.err(errors.NewBadRequest("fieldSelector metadata.name doesn't match requested name"), w, req) + return + } + } else { + opts.FieldSelector = nameSelector + } + } + + if opts.Watch || forceWatch { + if rw == nil { + scope.err(errors.NewMethodNotSupported(scope.Resource.GroupResource(), "watch"), w, req) + return + } + // TODO: Currently we explicitly ignore ?timeout= and use only ?timeoutSeconds=. + timeout := time.Duration(0) + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + if timeout == 0 && minRequestTimeout > 0 { + timeout = time.Duration(float64(minRequestTimeout) * (rand.Float64() + 1.0)) + } + klog.V(3).InfoS("Starting watch", "path", req.URL.Path, "resourceVersion", opts.ResourceVersion, "labels", opts.LabelSelector, "fields", opts.FieldSelector, "timeout", timeout) + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + watcher, err := rw.Watch(ctx, &opts) + if err != nil { + scope.err(err, w, req) + return + } + requestInfo, _ := request.RequestInfoFrom(ctx) + metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() { + serveWatch(watcher, scope, outputMediaType, req, w, timeout) + }) + return + } + + // Log only long List requests (ignore Watch). + defer trace.LogIfLong(500 * time.Millisecond) + trace.Step("About to List from storage") + result, err := r.List(ctx, &opts) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Listing from storage done") + + transformResponseObject(ctx, scope, trace, req, w, http.StatusOK, outputMediaType, result) + trace.Step("Writing http response done", utiltrace.Field{"count", meta.LenList(result)}) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go new file mode 100644 index 0000000000..82170e050e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "net/http" + + utilnet "k8s.io/apimachinery/pkg/util/net" +) + +const ( + maxUserAgentLength = 1024 + userAgentTruncateSuffix = "...TRUNCATED" +) + +// lazyTruncatedUserAgent implements String() string and it will +// return user-agent which may be truncated. +type lazyTruncatedUserAgent struct { + req *http.Request +} + +func (lazy *lazyTruncatedUserAgent) String() string { + ua := "unknown" + if lazy.req != nil { + ua = utilnet.GetHTTPClient(lazy.req) + if len(ua) > maxUserAgentLength { + ua = ua[:maxUserAgentLength] + userAgentTruncateSuffix + } + } + return ua +} + +// LazyClientIP implements String() string and it will +// calls GetClientIP() lazily only when required. +type lazyClientIP struct { + req *http.Request +} + +func (lazy *lazyClientIP) String() string { + if lazy.req != nil { + if ip := utilnet.GetClientIP(lazy.req); ip != nil { + return ip.String() + } + } + return "unknown" +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go new file mode 100644 index 0000000000..755da22eea --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/endpoints/request" +) + +// ScopeNamer handles accessing names from requests and objects +type ScopeNamer interface { + // Namespace returns the appropriate namespace value from the request (may be empty) or an + // error. + Namespace(req *http.Request) (namespace string, err error) + // Name returns the name from the request, and an optional namespace value if this is a namespace + // scoped call. An error is returned if the name is not available. + Name(req *http.Request) (namespace, name string, err error) + // ObjectName returns the namespace and name from an object if they exist, or an error if the object + // does not support names. + ObjectName(obj runtime.Object) (namespace, name string, err error) + // SetSelfLink sets the provided URL onto the object. The method should return nil if the object + // does not support selfLinks. + SetSelfLink(obj runtime.Object, url string) error + // GenerateLink creates an encoded URI for a given runtime object that represents the canonical path + // and query. + GenerateLink(requestInfo *request.RequestInfo, obj runtime.Object) (uri string, err error) + // GenerateListLink creates an encoded URI for a list that represents the canonical path and query. + GenerateListLink(req *http.Request) (uri string, err error) +} + +type ContextBasedNaming struct { + SelfLinker runtime.SelfLinker + ClusterScoped bool + + SelfLinkPathPrefix string + SelfLinkPathSuffix string +} + +// ContextBasedNaming implements ScopeNamer +var _ ScopeNamer = ContextBasedNaming{} + +func (n ContextBasedNaming) SetSelfLink(obj runtime.Object, url string) error { + return n.SelfLinker.SetSelfLink(obj, url) +} + +func (n ContextBasedNaming) Namespace(req *http.Request) (namespace string, err error) { + requestInfo, ok := request.RequestInfoFrom(req.Context()) + if !ok { + return "", fmt.Errorf("missing requestInfo") + } + return requestInfo.Namespace, nil +} + +func (n ContextBasedNaming) Name(req *http.Request) (namespace, name string, err error) { + requestInfo, ok := request.RequestInfoFrom(req.Context()) + if !ok { + return "", "", fmt.Errorf("missing requestInfo") + } + ns, err := n.Namespace(req) + if err != nil { + return "", "", err + } + + if len(requestInfo.Name) == 0 { + return "", "", errEmptyName + } + return ns, requestInfo.Name, nil +} + +// fastURLPathEncode encodes the provided path as a URL path +func fastURLPathEncode(path string) string { + for _, r := range []byte(path) { + switch { + case r >= '-' && r <= '9', r >= 'A' && r <= 'Z', r >= 'a' && r <= 'z': + // characters within this range do not require escaping + default: + var u url.URL + u.Path = path + return u.EscapedPath() + } + } + return path +} + +func (n ContextBasedNaming) GenerateLink(requestInfo *request.RequestInfo, obj runtime.Object) (uri string, err error) { + namespace, name, err := n.ObjectName(obj) + if err == errEmptyName && len(requestInfo.Name) > 0 { + name = requestInfo.Name + } else if err != nil { + return "", err + } + if len(namespace) == 0 && len(requestInfo.Namespace) > 0 { + namespace = requestInfo.Namespace + } + + if n.ClusterScoped { + return n.SelfLinkPathPrefix + url.QueryEscape(name) + n.SelfLinkPathSuffix, nil + } + + builder := strings.Builder{} + builder.Grow(len(n.SelfLinkPathPrefix) + len(namespace) + len(requestInfo.Resource) + len(name) + len(n.SelfLinkPathSuffix) + 8) + builder.WriteString(n.SelfLinkPathPrefix) + builder.WriteString(namespace) + builder.WriteByte('/') + builder.WriteString(requestInfo.Resource) + builder.WriteByte('/') + builder.WriteString(name) + builder.WriteString(n.SelfLinkPathSuffix) + return fastURLPathEncode(builder.String()), nil +} + +func (n ContextBasedNaming) GenerateListLink(req *http.Request) (uri string, err error) { + if len(req.URL.RawPath) > 0 { + return req.URL.RawPath, nil + } + return fastURLPathEncode(req.URL.Path), nil +} + +func (n ContextBasedNaming) ObjectName(obj runtime.Object) (namespace, name string, err error) { + name, err = n.SelfLinker.Name(obj) + if err != nil { + return "", "", err + } + if len(name) == 0 { + return "", "", errEmptyName + } + namespace, err = n.SelfLinker.Namespace(obj) + if err != nil { + return "", "", err + } + return namespace, name, err +} + +// errEmptyName is returned when API requests do not fill the name section of the path. +var errEmptyName = errors.NewBadRequest("name must be provided") diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/doc.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/doc.go new file mode 100644 index 0000000000..80f4feb727 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package negotiation contains media type negotiation logic. +package negotiation // import "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go new file mode 100644 index 0000000000..86faf525df --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package negotiation + +import ( + "fmt" + "net/http" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// errNotAcceptable indicates Accept negotiation has failed +type errNotAcceptable struct { + accepted []string +} + +// NewNotAcceptableError returns an error of NotAcceptable which contains specified string +func NewNotAcceptableError(accepted []string) error { + return errNotAcceptable{accepted} +} + +func (e errNotAcceptable) Error() string { + return fmt.Sprintf("only the following media types are accepted: %v", strings.Join(e.accepted, ", ")) +} + +func (e errNotAcceptable) Status() metav1.Status { + return metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotAcceptable, + Reason: metav1.StatusReasonNotAcceptable, + Message: e.Error(), + } +} + +// errNotAcceptableConversion indicates Accept negotiation has failed specifically +// for a conversion to a known type. +type errNotAcceptableConversion struct { + target string + accepted []string +} + +// NewNotAcceptableConversionError returns an error indicating that the desired +// API transformation to the target group version kind string is not accepted and +// only the listed mime types are allowed. This is temporary while Table does not +// yet support protobuf encoding. +func NewNotAcceptableConversionError(target string, accepted []string) error { + return errNotAcceptableConversion{target, accepted} +} + +func (e errNotAcceptableConversion) Error() string { + return fmt.Sprintf("only the following media types are accepted when converting to %s: %v", e.target, strings.Join(e.accepted, ", ")) +} + +func (e errNotAcceptableConversion) Status() metav1.Status { + return metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotAcceptable, + Reason: metav1.StatusReasonNotAcceptable, + Message: e.Error(), + } +} + +// errUnsupportedMediaType indicates Content-Type is not recognized +type errUnsupportedMediaType struct { + accepted []string +} + +// NewUnsupportedMediaTypeError returns an error of UnsupportedMediaType which contains specified string +func NewUnsupportedMediaTypeError(accepted []string) error { + return errUnsupportedMediaType{accepted} +} + +func (e errUnsupportedMediaType) Error() string { + return fmt.Sprintf("the body of the request was in an unknown format - accepted media types include: %v", strings.Join(e.accepted, ", ")) +} + +func (e errUnsupportedMediaType) Status() metav1.Status { + return metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusUnsupportedMediaType, + Reason: metav1.StatusReasonUnsupportedMediaType, + Message: e.Error(), + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go new file mode 100644 index 0000000000..9dbad1ea65 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go @@ -0,0 +1,263 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package negotiation + +import ( + "mime" + "net/http" + "strconv" + "strings" + + "github.com/munnerz/goautoneg" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// MediaTypesForSerializer returns a list of media and stream media types for the server. +func MediaTypesForSerializer(ns runtime.NegotiatedSerializer) (mediaTypes, streamMediaTypes []string) { + for _, info := range ns.SupportedMediaTypes() { + mediaTypes = append(mediaTypes, info.MediaType) + if info.StreamSerializer != nil { + // stream=watch is the existing mime-type parameter for watch + streamMediaTypes = append(streamMediaTypes, info.MediaType+";stream=watch") + } + } + return mediaTypes, streamMediaTypes +} + +// NegotiateOutputMediaType negotiates the output structured media type and a serializer, or +// returns an error. +func NegotiateOutputMediaType(req *http.Request, ns runtime.NegotiatedSerializer, restrictions EndpointRestrictions) (MediaTypeOptions, runtime.SerializerInfo, error) { + mediaType, ok := NegotiateMediaTypeOptions(req.Header.Get("Accept"), ns.SupportedMediaTypes(), restrictions) + if !ok { + supported, _ := MediaTypesForSerializer(ns) + return mediaType, runtime.SerializerInfo{}, NewNotAcceptableError(supported) + } + // TODO: move into resthandler + info := mediaType.Accepted + if (mediaType.Pretty || isPrettyPrint(req)) && info.PrettySerializer != nil { + info.Serializer = info.PrettySerializer + } + return mediaType, info, nil +} + +// NegotiateOutputMediaTypeStream returns a stream serializer for the given request. +func NegotiateOutputMediaTypeStream(req *http.Request, ns runtime.NegotiatedSerializer, restrictions EndpointRestrictions) (runtime.SerializerInfo, error) { + mediaType, ok := NegotiateMediaTypeOptions(req.Header.Get("Accept"), ns.SupportedMediaTypes(), restrictions) + if !ok || mediaType.Accepted.StreamSerializer == nil { + _, supported := MediaTypesForSerializer(ns) + return runtime.SerializerInfo{}, NewNotAcceptableError(supported) + } + return mediaType.Accepted, nil +} + +// NegotiateInputSerializer returns the input serializer for the provided request. +func NegotiateInputSerializer(req *http.Request, streaming bool, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { + mediaType := req.Header.Get("Content-Type") + return NegotiateInputSerializerForMediaType(mediaType, streaming, ns) +} + +// NegotiateInputSerializerForMediaType returns the appropriate serializer for the given media type or an error. +func NegotiateInputSerializerForMediaType(mediaType string, streaming bool, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { + mediaTypes := ns.SupportedMediaTypes() + if len(mediaType) == 0 { + mediaType = mediaTypes[0].MediaType + } + if mediaType, _, err := mime.ParseMediaType(mediaType); err == nil { + if info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType); ok { + return info, nil + } + } + + supported, streamingSupported := MediaTypesForSerializer(ns) + if streaming { + return runtime.SerializerInfo{}, NewUnsupportedMediaTypeError(streamingSupported) + } + return runtime.SerializerInfo{}, NewUnsupportedMediaTypeError(supported) +} + +// isPrettyPrint returns true if the "pretty" query parameter is true or if the User-Agent +// matches known "human" clients. +func isPrettyPrint(req *http.Request) bool { + // DEPRECATED: should be part of the content type + if req.URL != nil { + // avoid an allocation caused by parsing the URL query + if strings.Contains(req.URL.RawQuery, "pretty") { + pp := req.URL.Query().Get("pretty") + if len(pp) > 0 { + pretty, _ := strconv.ParseBool(pp) + return pretty + } + } + } + userAgent := req.UserAgent() + // This covers basic all browsers and cli http tools + if strings.HasPrefix(userAgent, "curl") || strings.HasPrefix(userAgent, "Wget") || strings.HasPrefix(userAgent, "Mozilla/5.0") { + return true + } + return false +} + +// EndpointRestrictions is an interface that allows content-type negotiation +// to verify server support for specific options +type EndpointRestrictions interface { + // AllowsMediaTypeTransform returns true if the endpoint allows either the requested mime type + // or the requested transformation. If false, the caller should ignore this mime type. If the + // target is nil, the client is not requesting a transformation. + AllowsMediaTypeTransform(mimeType, mimeSubType string, target *schema.GroupVersionKind) bool + // AllowsServerVersion should return true if the specified version is valid + // for the server group. + AllowsServerVersion(version string) bool + // AllowsStreamSchema should return true if the specified stream schema is + // valid for the server group. + AllowsStreamSchema(schema string) bool +} + +// DefaultEndpointRestrictions is the default EndpointRestrictions which allows +// content-type negotiation to verify server support for specific options +var DefaultEndpointRestrictions = emptyEndpointRestrictions{} + +type emptyEndpointRestrictions struct{} + +func (emptyEndpointRestrictions) AllowsMediaTypeTransform(mimeType string, mimeSubType string, gvk *schema.GroupVersionKind) bool { + return gvk == nil +} +func (emptyEndpointRestrictions) AllowsServerVersion(string) bool { return false } +func (emptyEndpointRestrictions) AllowsStreamSchema(s string) bool { return s == "watch" } + +// MediaTypeOptions describes information for a given media type that may alter +// the server response +type MediaTypeOptions struct { + // pretty is true if the requested representation should be formatted for human + // viewing + Pretty bool + + // stream, if set, indicates that a streaming protocol variant of this encoding + // is desired. The only currently supported value is watch which returns versioned + // events. In the future, this may refer to other stream protocols. + Stream string + + // convert is a request to alter the type of object returned by the server from the + // normal response + Convert *schema.GroupVersionKind + // useServerVersion is an optional version for the server group + UseServerVersion string + + // export is true if the representation requested should exclude fields the server + // has set + Export bool + + // unrecognized is a list of all unrecognized keys + Unrecognized []string + + // the accepted media type from the client + Accepted runtime.SerializerInfo +} + +// acceptMediaTypeOptions returns an options object that matches the provided media type params. If +// it returns false, the provided options are not allowed and the media type must be skipped. These +// parameters are unversioned and may not be changed. +func acceptMediaTypeOptions(params map[string]string, accepts *runtime.SerializerInfo, endpoint EndpointRestrictions) (MediaTypeOptions, bool) { + var options MediaTypeOptions + + // extract all known parameters + for k, v := range params { + switch k { + + // controls transformation of the object when returned + case "as": + if options.Convert == nil { + options.Convert = &schema.GroupVersionKind{} + } + options.Convert.Kind = v + case "g": + if options.Convert == nil { + options.Convert = &schema.GroupVersionKind{} + } + options.Convert.Group = v + case "v": + if options.Convert == nil { + options.Convert = &schema.GroupVersionKind{} + } + options.Convert.Version = v + + // controls the streaming schema + case "stream": + if len(v) > 0 && (accepts.StreamSerializer == nil || !endpoint.AllowsStreamSchema(v)) { + return MediaTypeOptions{}, false + } + options.Stream = v + + // controls the version of the server API group used + // for generic output + case "sv": + if len(v) > 0 && !endpoint.AllowsServerVersion(v) { + return MediaTypeOptions{}, false + } + options.UseServerVersion = v + + // if specified, the server should transform the returned + // output and remove fields that are always server specified, + // or which fit the default behavior. + case "export": + options.Export = v == "1" + + // if specified, the pretty serializer will be used + case "pretty": + options.Pretty = v == "1" + + default: + options.Unrecognized = append(options.Unrecognized, k) + } + } + + if !endpoint.AllowsMediaTypeTransform(accepts.MediaTypeType, accepts.MediaTypeSubType, options.Convert) { + return MediaTypeOptions{}, false + } + + options.Accepted = *accepts + return options, true +} + +// NegotiateMediaTypeOptions returns the most appropriate content type given the accept header and +// a list of alternatives along with the accepted media type parameters. +func NegotiateMediaTypeOptions(header string, accepted []runtime.SerializerInfo, endpoint EndpointRestrictions) (MediaTypeOptions, bool) { + if len(header) == 0 && len(accepted) > 0 { + return MediaTypeOptions{ + Accepted: accepted[0], + }, true + } + + clauses := goautoneg.ParseAccept(header) + for i := range clauses { + clause := &clauses[i] + for i := range accepted { + accepts := &accepted[i] + switch { + case clause.Type == accepts.MediaTypeType && clause.SubType == accepts.MediaTypeSubType, + clause.Type == accepts.MediaTypeType && clause.SubType == "*", + clause.Type == "*" && clause.SubType == "*": + if retVal, ret := acceptMediaTypeOptions(clause.Params, accepts, endpoint); ret { + return retVal, true + } + } + } + } + + return MediaTypeOptions{}, false +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go new file mode 100644 index 0000000000..096330a4ae --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -0,0 +1,680 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/util/dryrun" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utiltrace "k8s.io/utils/trace" +) + +const ( + // maximum number of operations a single json patch may contain. + maxJSONPatchOperations = 10000 +) + +// PatchResource returns a function that will handle a resource patch. +func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interface, patchTypes []string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("Patch", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) + defer trace.LogIfLong(500 * time.Millisecond) + + if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { + scope.err(errors.NewBadRequest("the dryRun feature is disabled"), w, req) + return + } + + // Do this first, otherwise name extraction can fail for unrecognized content types + // TODO: handle this in negotiation + contentType := req.Header.Get("Content-Type") + // Remove "; charset=" if included in header. + if idx := strings.Index(contentType, ";"); idx > 0 { + contentType = contentType[:idx] + } + patchType := types.PatchType(contentType) + + // Ensure the patchType is one we support + if !sets.NewString(patchTypes...).Has(contentType) { + scope.err(negotiation.NewUnsupportedMediaTypeError(patchTypes), w, req) + return + } + + // TODO: we either want to remove timeout or document it (if we + // document, move timeout out of this function and declare it in + // api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + ctx = request.WithNamespace(ctx, namespace) + + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + patchBytes, err := limitedReadBody(req, scope.MaxRequestBodyBytes) + if err != nil { + scope.err(err, w, req) + return + } + + options := &metav1.PatchOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + if errs := validation.ValidatePatchOptions(options, patchType); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "PatchOptions"}, "", errs) + scope.err(err, w, req) + return + } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("PatchOptions")) + + ae := request.AuditEventFrom(ctx) + admit = admission.WithAudit(admit, ae) + + audit.LogRequestPatch(ae, patchBytes) + trace.Step("Recorded the audit event") + + baseContentType := runtime.ContentTypeJSON + if patchType == types.ApplyPatchType { + baseContentType = runtime.ContentTypeYAML + } + s, ok := runtime.SerializerInfoForMediaType(scope.Serializer.SupportedMediaTypes(), baseContentType) + if !ok { + scope.err(fmt.Errorf("no serializer defined for %v", baseContentType), w, req) + return + } + gv := scope.Kind.GroupVersion() + + codec := runtime.NewCodec( + scope.Serializer.EncoderForVersion(s.Serializer, gv), + scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion), + ) + + userInfo, _ := request.UserFrom(ctx) + staticCreateAttributes := admission.NewAttributesRecord( + nil, + nil, + scope.Kind, + namespace, + name, + scope.Resource, + scope.Subresource, + admission.Create, + patchToCreateOptions(options), + dryrun.IsDryRun(options.DryRun), + userInfo) + staticUpdateAttributes := admission.NewAttributesRecord( + nil, + nil, + scope.Kind, + namespace, + name, + scope.Resource, + scope.Subresource, + admission.Update, + patchToUpdateOptions(options), + dryrun.IsDryRun(options.DryRun), + userInfo, + ) + + mutatingAdmission, _ := admit.(admission.MutationInterface) + createAuthorizerAttributes := authorizer.AttributesRecord{ + User: userInfo, + ResourceRequest: true, + Path: req.URL.Path, + Verb: "create", + APIGroup: scope.Resource.Group, + APIVersion: scope.Resource.Version, + Resource: scope.Resource.Resource, + Subresource: scope.Subresource, + Namespace: namespace, + Name: name, + } + + p := patcher{ + namer: scope.Namer, + creater: scope.Creater, + defaulter: scope.Defaulter, + typer: scope.Typer, + unsafeConvertor: scope.UnsafeConvertor, + kind: scope.Kind, + resource: scope.Resource, + subresource: scope.Subresource, + dryRun: dryrun.IsDryRun(options.DryRun), + + objectInterfaces: scope, + + hubGroupVersion: scope.HubGroupVersion, + + createValidation: withAuthorization(rest.AdmissionToValidateObjectFunc(admit, staticCreateAttributes, scope), scope.Authorizer, createAuthorizerAttributes), + updateValidation: rest.AdmissionToValidateObjectUpdateFunc(admit, staticUpdateAttributes, scope), + admissionCheck: mutatingAdmission, + + codec: codec, + + timeout: timeout, + options: options, + + restPatcher: r, + name: name, + patchType: patchType, + patchBytes: patchBytes, + userAgent: req.UserAgent(), + + trace: trace, + } + + result, wasCreated, err := p.patchResource(ctx, scope) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Object stored in database") + + if err := setObjectSelfLink(ctx, result, req, scope.Namer); err != nil { + scope.err(err, w, req) + return + } + trace.Step("Self-link added") + + status := http.StatusOK + if wasCreated { + status = http.StatusCreated + } + transformResponseObject(ctx, scope, trace, req, w, status, outputMediaType, result) + } +} + +type mutateObjectUpdateFunc func(ctx context.Context, obj, old runtime.Object) error + +// patcher breaks the process of patch application and retries into smaller +// pieces of functionality. +// TODO: Use builder pattern to construct this object? +// TODO: As part of that effort, some aspects of PatchResource above could be +// moved into this type. +type patcher struct { + // Pieces of RequestScope + namer ScopeNamer + creater runtime.ObjectCreater + defaulter runtime.ObjectDefaulter + typer runtime.ObjectTyper + unsafeConvertor runtime.ObjectConvertor + resource schema.GroupVersionResource + kind schema.GroupVersionKind + subresource string + dryRun bool + + objectInterfaces admission.ObjectInterfaces + + hubGroupVersion schema.GroupVersion + + // Validation functions + createValidation rest.ValidateObjectFunc + updateValidation rest.ValidateObjectUpdateFunc + admissionCheck admission.MutationInterface + + codec runtime.Codec + + timeout time.Duration + options *metav1.PatchOptions + + // Operation information + restPatcher rest.Patcher + name string + patchType types.PatchType + patchBytes []byte + userAgent string + + trace *utiltrace.Trace + + // Set at invocation-time (by applyPatch) and immutable thereafter + namespace string + updatedObjectInfo rest.UpdatedObjectInfo + mechanism patchMechanism + forceAllowCreate bool +} + +type patchMechanism interface { + applyPatchToCurrentObject(currentObject runtime.Object) (runtime.Object, error) + createNewObject() (runtime.Object, error) +} + +type jsonPatcher struct { + *patcher + + fieldManager *fieldmanager.FieldManager +} + +func (p *jsonPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (runtime.Object, error) { + // Encode will convert & return a versioned object in JSON. + currentObjJS, err := runtime.Encode(p.codec, currentObject) + if err != nil { + return nil, err + } + + // Apply the patch. + patchedObjJS, err := p.applyJSPatch(currentObjJS) + if err != nil { + return nil, err + } + + // Construct the resulting typed, unversioned object. + objToUpdate := p.restPatcher.New() + if err := runtime.DecodeInto(p.codec, patchedObjJS, objToUpdate); err != nil { + return nil, errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), string(patchedObjJS), err.Error()), + }) + } + + if p.fieldManager != nil { + objToUpdate = p.fieldManager.UpdateNoErrors(currentObject, objToUpdate, managerOrUserAgent(p.options.FieldManager, p.userAgent)) + } + return objToUpdate, nil +} + +func (p *jsonPatcher) createNewObject() (runtime.Object, error) { + return nil, errors.NewNotFound(p.resource.GroupResource(), p.name) +} + +// applyJSPatch applies the patch. Input and output objects must both have +// the external version, since that is what the patch must have been constructed against. +func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr error) { + switch p.patchType { + case types.JSONPatchType: + // sanity check potentially abusive patches + // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) + if len(p.patchBytes) > 1024*1024 { + v := []interface{}{} + if err := json.Unmarshal(p.patchBytes, &v); err != nil { + return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) + } + } + + patchObj, err := jsonpatch.DecodePatch(p.patchBytes) + if err != nil { + return nil, errors.NewBadRequest(err.Error()) + } + if len(patchObj) > maxJSONPatchOperations { + return nil, errors.NewRequestEntityTooLargeError( + fmt.Sprintf("The allowed maximum operations in a JSON patch is %d, got %d", + maxJSONPatchOperations, len(patchObj))) + } + patchedJS, err := patchObj.Apply(versionedJS) + if err != nil { + return nil, errors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false) + } + return patchedJS, nil + case types.MergePatchType: + // sanity check potentially abusive patches + // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) + if len(p.patchBytes) > 1024*1024 { + v := map[string]interface{}{} + if err := json.Unmarshal(p.patchBytes, &v); err != nil { + return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) + } + } + + return jsonpatch.MergePatch(versionedJS, p.patchBytes) + default: + // only here as a safety net - go-restful filters content-type + return nil, fmt.Errorf("unknown Content-Type header for patch: %v", p.patchType) + } +} + +type smpPatcher struct { + *patcher + + // Schema + schemaReferenceObj runtime.Object + fieldManager *fieldmanager.FieldManager +} + +func (p *smpPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (runtime.Object, error) { + // Since the patch is applied on versioned objects, we need to convert the + // current object to versioned representation first. + currentVersionedObject, err := p.unsafeConvertor.ConvertToVersion(currentObject, p.kind.GroupVersion()) + if err != nil { + return nil, err + } + versionedObjToUpdate, err := p.creater.New(p.kind) + if err != nil { + return nil, err + } + if err := strategicPatchObject(p.defaulter, currentVersionedObject, p.patchBytes, versionedObjToUpdate, p.schemaReferenceObj); err != nil { + return nil, err + } + // Convert the object back to the hub version + newObj, err := p.unsafeConvertor.ConvertToVersion(versionedObjToUpdate, p.hubGroupVersion) + if err != nil { + return nil, err + } + + if p.fieldManager != nil { + newObj = p.fieldManager.UpdateNoErrors(currentObject, newObj, managerOrUserAgent(p.options.FieldManager, p.userAgent)) + } + return newObj, nil +} + +func (p *smpPatcher) createNewObject() (runtime.Object, error) { + return nil, errors.NewNotFound(p.resource.GroupResource(), p.name) +} + +type applyPatcher struct { + patch []byte + options *metav1.PatchOptions + creater runtime.ObjectCreater + kind schema.GroupVersionKind + fieldManager *fieldmanager.FieldManager + userAgent string +} + +func (p *applyPatcher) applyPatchToCurrentObject(obj runtime.Object) (runtime.Object, error) { + force := false + if p.options.Force != nil { + force = *p.options.Force + } + if p.fieldManager == nil { + panic("FieldManager must be installed to run apply") + } + + patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := yaml.Unmarshal(p.patch, &patchObj.Object); err != nil { + return nil, errors.NewBadRequest(fmt.Sprintf("error decoding YAML: %v", err)) + } + + return p.fieldManager.Apply(obj, patchObj, p.options.FieldManager, force) +} + +func (p *applyPatcher) createNewObject() (runtime.Object, error) { + obj, err := p.creater.New(p.kind) + if err != nil { + return nil, fmt.Errorf("failed to create new object: %v", err) + } + return p.applyPatchToCurrentObject(obj) +} + +// strategicPatchObject applies a strategic merge patch of to +// and stores the result in . +// It additionally returns the map[string]interface{} representation of the +// and . +// NOTE: Both and are supposed to be versioned. +func strategicPatchObject( + defaulter runtime.ObjectDefaulter, + originalObject runtime.Object, + patchBytes []byte, + objToUpdate runtime.Object, + schemaReferenceObj runtime.Object, +) error { + originalObjMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(originalObject) + if err != nil { + return err + } + + patchMap := make(map[string]interface{}) + if err := json.Unmarshal(patchBytes, &patchMap); err != nil { + return errors.NewBadRequest(err.Error()) + } + + if err := applyPatchToObject(defaulter, originalObjMap, patchMap, objToUpdate, schemaReferenceObj); err != nil { + return err + } + return nil +} + +// applyPatch is called every time GuaranteedUpdate asks for the updated object, +// and is given the currently persisted object as input. +// TODO: rename this function because the name implies it is related to applyPatcher +func (p *patcher) applyPatch(_ context.Context, _, currentObject runtime.Object) (objToUpdate runtime.Object, patchErr error) { + // Make sure we actually have a persisted currentObject + p.trace.Step("About to apply patch") + currentObjectHasUID, err := hasUID(currentObject) + if err != nil { + return nil, err + } else if !currentObjectHasUID { + objToUpdate, patchErr = p.mechanism.createNewObject() + } else { + objToUpdate, patchErr = p.mechanism.applyPatchToCurrentObject(currentObject) + } + + if patchErr != nil { + return nil, patchErr + } + + objToUpdateHasUID, err := hasUID(objToUpdate) + if err != nil { + return nil, err + } + if objToUpdateHasUID && !currentObjectHasUID { + accessor, err := meta.Accessor(objToUpdate) + if err != nil { + return nil, err + } + return nil, errors.NewConflict(p.resource.GroupResource(), p.name, fmt.Errorf("uid mismatch: the provided object specified uid %s, and no existing object was found", accessor.GetUID())) + } + + if err := checkName(objToUpdate, p.name, p.namespace, p.namer); err != nil { + return nil, err + } + return objToUpdate, nil +} + +func (p *patcher) admissionAttributes(ctx context.Context, updatedObject runtime.Object, currentObject runtime.Object, operation admission.Operation, operationOptions runtime.Object) admission.Attributes { + userInfo, _ := request.UserFrom(ctx) + return admission.NewAttributesRecord(updatedObject, currentObject, p.kind, p.namespace, p.name, p.resource, p.subresource, operation, operationOptions, p.dryRun, userInfo) +} + +// applyAdmission is called every time GuaranteedUpdate asks for the updated object, +// and is given the currently persisted object and the patched object as input. +// TODO: rename this function because the name implies it is related to applyPatcher +func (p *patcher) applyAdmission(ctx context.Context, patchedObject runtime.Object, currentObject runtime.Object) (runtime.Object, error) { + p.trace.Step("About to check admission control") + var operation admission.Operation + var options runtime.Object + if hasUID, err := hasUID(currentObject); err != nil { + return nil, err + } else if !hasUID { + operation = admission.Create + currentObject = nil + options = patchToCreateOptions(p.options) + } else { + operation = admission.Update + options = patchToUpdateOptions(p.options) + } + if p.admissionCheck != nil && p.admissionCheck.Handles(operation) { + attributes := p.admissionAttributes(ctx, patchedObject, currentObject, operation, options) + return patchedObject, p.admissionCheck.Admit(ctx, attributes, p.objectInterfaces) + } + return patchedObject, nil +} + +// patchResource divides PatchResource for easier unit testing +func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runtime.Object, bool, error) { + p.namespace = request.NamespaceValue(ctx) + switch p.patchType { + case types.JSONPatchType, types.MergePatchType: + p.mechanism = &jsonPatcher{ + patcher: p, + fieldManager: scope.FieldManager, + } + case types.StrategicMergePatchType: + schemaReferenceObj, err := p.unsafeConvertor.ConvertToVersion(p.restPatcher.New(), p.kind.GroupVersion()) + if err != nil { + return nil, false, err + } + p.mechanism = &smpPatcher{ + patcher: p, + schemaReferenceObj: schemaReferenceObj, + fieldManager: scope.FieldManager, + } + // this case is unreachable if ServerSideApply is not enabled because we will have already rejected the content type + case types.ApplyPatchType: + p.mechanism = &applyPatcher{ + fieldManager: scope.FieldManager, + patch: p.patchBytes, + options: p.options, + creater: p.creater, + kind: p.kind, + userAgent: p.userAgent, + } + p.forceAllowCreate = true + default: + return nil, false, fmt.Errorf("%v: unimplemented patch type", p.patchType) + } + dedupOwnerReferencesTransformer := func(_ context.Context, obj, _ runtime.Object) (runtime.Object, error) { + // Dedup owner references after mutating admission happens + dedupOwnerReferencesAndAddWarning(obj, ctx, true) + return obj, nil + } + + wasCreated := false + p.updatedObjectInfo = rest.DefaultUpdatedObjectInfo(nil, p.applyPatch, p.applyAdmission, dedupOwnerReferencesTransformer) + requestFunc := func() (runtime.Object, error) { + // Pass in UpdateOptions to override UpdateStrategy.AllowUpdateOnCreate + options := patchToUpdateOptions(p.options) + updateObject, created, updateErr := p.restPatcher.Update(ctx, p.name, p.updatedObjectInfo, p.createValidation, p.updateValidation, p.forceAllowCreate, options) + wasCreated = created + return updateObject, updateErr + } + result, err := finishRequest(p.timeout, func() (runtime.Object, error) { + result, err := requestFunc() + // If the object wasn't committed to storage because it's serialized size was too large, + // it is safe to remove managedFields (which can be large) and try again. + if isTooLargeError(err) && p.patchType != types.ApplyPatchType { + if _, accessorErr := meta.Accessor(p.restPatcher.New()); accessorErr == nil { + p.updatedObjectInfo = rest.DefaultUpdatedObjectInfo(nil, + p.applyPatch, + p.applyAdmission, + dedupOwnerReferencesTransformer, + func(_ context.Context, obj, _ runtime.Object) (runtime.Object, error) { + accessor, _ := meta.Accessor(obj) + accessor.SetManagedFields(nil) + return obj, nil + }) + result, err = requestFunc() + } + } + return result, err + }) + return result, wasCreated, err +} + +// applyPatchToObject applies a strategic merge patch of to +// and stores the result in . +// NOTE: must be a versioned object. +func applyPatchToObject( + defaulter runtime.ObjectDefaulter, + originalMap map[string]interface{}, + patchMap map[string]interface{}, + objToUpdate runtime.Object, + schemaReferenceObj runtime.Object, +) error { + patchedObjMap, err := strategicpatch.StrategicMergeMapPatch(originalMap, patchMap, schemaReferenceObj) + if err != nil { + return interpretStrategicMergePatchError(err) + } + + // Rather than serialize the patched map to JSON, then decode it to an object, we go directly from a map to an object + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(patchedObjMap, objToUpdate); err != nil { + return errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), err.Error()), + }) + } + // Decoding from JSON to a versioned object would apply defaults, so we do the same here + defaulter.Default(objToUpdate) + + return nil +} + +// interpretStrategicMergePatchError interprets the error type and returns an error with appropriate HTTP code. +func interpretStrategicMergePatchError(err error) error { + switch err { + case mergepatch.ErrBadJSONDoc, mergepatch.ErrBadPatchFormatForPrimitiveList, mergepatch.ErrBadPatchFormatForRetainKeys, mergepatch.ErrBadPatchFormatForSetElementOrderList, mergepatch.ErrUnsupportedStrategicMergePatchFormat: + return errors.NewBadRequest(err.Error()) + case mergepatch.ErrNoListOfLists, mergepatch.ErrPatchContentNotMatchRetainKeys: + return errors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false) + default: + return err + } +} + +// patchToUpdateOptions creates an UpdateOptions with the same field values as the provided PatchOptions. +func patchToUpdateOptions(po *metav1.PatchOptions) *metav1.UpdateOptions { + if po == nil { + return nil + } + uo := &metav1.UpdateOptions{ + DryRun: po.DryRun, + FieldManager: po.FieldManager, + } + uo.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions")) + return uo +} + +// patchToCreateOptions creates an CreateOptions with the same field values as the provided PatchOptions. +func patchToCreateOptions(po *metav1.PatchOptions) *metav1.CreateOptions { + if po == nil { + return nil + } + co := &metav1.CreateOptions{ + DryRun: po.DryRun, + FieldManager: po.FieldManager, + } + co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) + return co +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go new file mode 100644 index 0000000000..f9b38e07d3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go @@ -0,0 +1,275 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "context" + "fmt" + "net/http" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + utiltrace "k8s.io/utils/trace" +) + +// transformObject takes the object as returned by storage and ensures it is in +// the client's desired form, as well as ensuring any API level fields like self-link +// are properly set. +func transformObject(ctx context.Context, obj runtime.Object, opts interface{}, mediaType negotiation.MediaTypeOptions, scope *RequestScope, req *http.Request) (runtime.Object, error) { + if co, ok := obj.(runtime.CacheableObject); ok { + if mediaType.Convert != nil { + // Non-nil mediaType.Convert means that some conversion of the object + // has to happen. Currently conversion may potentially modify the + // object or assume something about it (e.g. asTable operates on + // reflection, which won't work for any wrapper). + // To ensure it will work correctly, let's operate on base objects + // and not cache it for now. + // + // TODO: Long-term, transformObject should be changed so that it + // implements runtime.Encoder interface. + return doTransformObject(ctx, co.GetObject(), opts, mediaType, scope, req) + } + } + return doTransformObject(ctx, obj, opts, mediaType, scope, req) +} + +func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{}, mediaType negotiation.MediaTypeOptions, scope *RequestScope, req *http.Request) (runtime.Object, error) { + if _, ok := obj.(*metav1.Status); ok { + return obj, nil + } + if err := setObjectSelfLink(ctx, obj, req, scope.Namer); err != nil { + return nil, err + } + + switch target := mediaType.Convert; { + case target == nil: + return obj, nil + + case target.Kind == "PartialObjectMetadata": + return asPartialObjectMetadata(obj, target.GroupVersion()) + + case target.Kind == "PartialObjectMetadataList": + return asPartialObjectMetadataList(obj, target.GroupVersion()) + + case target.Kind == "Table": + options, ok := opts.(*metav1.TableOptions) + if !ok { + return nil, fmt.Errorf("unexpected TableOptions, got %T", opts) + } + return asTable(ctx, obj, options, scope, target.GroupVersion()) + + default: + accepted, _ := negotiation.MediaTypesForSerializer(metainternalversionscheme.Codecs) + err := negotiation.NewNotAcceptableError(accepted) + return nil, err + } +} + +// optionsForTransform will load and validate any additional query parameter options for +// a conversion or return an error. +func optionsForTransform(mediaType negotiation.MediaTypeOptions, req *http.Request) (interface{}, error) { + switch target := mediaType.Convert; { + case target == nil: + case target.Kind == "Table" && (target.GroupVersion() == metav1beta1.SchemeGroupVersion || target.GroupVersion() == metav1.SchemeGroupVersion): + opts := &metav1.TableOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, opts); err != nil { + return nil, err + } + switch errs := validation.ValidateTableOptions(opts); len(errs) { + case 0: + return opts, nil + case 1: + return nil, errors.NewBadRequest(fmt.Sprintf("Unable to convert to Table as requested: %v", errs[0].Error())) + default: + return nil, errors.NewBadRequest(fmt.Sprintf("Unable to convert to Table as requested: %v", errs)) + } + } + return nil, nil +} + +// targetEncodingForTransform returns the appropriate serializer for the input media type +func targetEncodingForTransform(scope *RequestScope, mediaType negotiation.MediaTypeOptions, req *http.Request) (schema.GroupVersionKind, runtime.NegotiatedSerializer, bool) { + switch target := mediaType.Convert; { + case target == nil: + case (target.Kind == "PartialObjectMetadata" || target.Kind == "PartialObjectMetadataList" || target.Kind == "Table") && + (target.GroupVersion() == metav1beta1.SchemeGroupVersion || target.GroupVersion() == metav1.SchemeGroupVersion): + return *target, metainternalversionscheme.Codecs, true + } + return scope.Kind, scope.Serializer, false +} + +// transformResponseObject takes an object loaded from storage and performs any necessary transformations. +// Will write the complete response object. +func transformResponseObject(ctx context.Context, scope *RequestScope, trace *utiltrace.Trace, req *http.Request, w http.ResponseWriter, statusCode int, mediaType negotiation.MediaTypeOptions, result runtime.Object) { + options, err := optionsForTransform(mediaType, req) + if err != nil { + scope.err(err, w, req) + return + } + obj, err := transformObject(ctx, result, options, mediaType, scope, req) + if err != nil { + scope.err(err, w, req) + return + } + kind, serializer, _ := targetEncodingForTransform(scope, mediaType, req) + responsewriters.WriteObjectNegotiated(serializer, scope, kind.GroupVersion(), w, req, statusCode, obj) +} + +// errNotAcceptable indicates Accept negotiation has failed +type errNotAcceptable struct { + message string +} + +func newNotAcceptableError(message string) error { + return errNotAcceptable{message} +} + +func (e errNotAcceptable) Error() string { + return e.message +} + +func (e errNotAcceptable) Status() metav1.Status { + return metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotAcceptable, + Reason: metav1.StatusReason("NotAcceptable"), + Message: e.Error(), + } +} + +func asTable(ctx context.Context, result runtime.Object, opts *metav1.TableOptions, scope *RequestScope, groupVersion schema.GroupVersion) (runtime.Object, error) { + switch groupVersion { + case metav1beta1.SchemeGroupVersion, metav1.SchemeGroupVersion: + default: + return nil, newNotAcceptableError(fmt.Sprintf("no Table exists in group version %s", groupVersion)) + } + + obj, err := scope.TableConvertor.ConvertToTable(ctx, result, opts) + if err != nil { + return nil, err + } + + table := (*metav1.Table)(obj) + + for i := range table.Rows { + item := &table.Rows[i] + switch opts.IncludeObject { + case metav1.IncludeObject: + item.Object.Object, err = scope.Convertor.ConvertToVersion(item.Object.Object, scope.Kind.GroupVersion()) + if err != nil { + return nil, err + } + // TODO: rely on defaulting for the value here? + case metav1.IncludeMetadata, "": + m, err := meta.Accessor(item.Object.Object) + if err != nil { + return nil, err + } + // TODO: turn this into an internal type and do conversion in order to get object kind automatically set? + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(groupVersion.WithKind("PartialObjectMetadata")) + item.Object.Object = partial + case metav1.IncludeNone: + item.Object.Object = nil + default: + err = errors.NewBadRequest(fmt.Sprintf("unrecognized includeObject value: %q", opts.IncludeObject)) + return nil, err + } + } + + return table, nil +} + +func asPartialObjectMetadata(result runtime.Object, groupVersion schema.GroupVersion) (runtime.Object, error) { + if meta.IsListType(result) { + err := newNotAcceptableError(fmt.Sprintf("you requested PartialObjectMetadata, but the requested object is a list (%T)", result)) + return nil, err + } + switch groupVersion { + case metav1beta1.SchemeGroupVersion, metav1.SchemeGroupVersion: + default: + return nil, newNotAcceptableError(fmt.Sprintf("no PartialObjectMetadataList exists in group version %s", groupVersion)) + } + m, err := meta.Accessor(result) + if err != nil { + return nil, err + } + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(groupVersion.WithKind("PartialObjectMetadata")) + return partial, nil +} + +func asPartialObjectMetadataList(result runtime.Object, groupVersion schema.GroupVersion) (runtime.Object, error) { + li, ok := result.(metav1.ListInterface) + if !ok { + return nil, newNotAcceptableError(fmt.Sprintf("you requested PartialObjectMetadataList, but the requested object is not a list (%T)", result)) + } + + gvk := groupVersion.WithKind("PartialObjectMetadata") + switch { + case groupVersion == metav1beta1.SchemeGroupVersion: + list := &metav1beta1.PartialObjectMetadataList{} + err := meta.EachListItem(result, func(obj runtime.Object) error { + m, err := meta.Accessor(obj) + if err != nil { + return err + } + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(gvk) + list.Items = append(list.Items, *partial) + return nil + }) + if err != nil { + return nil, err + } + list.SelfLink = li.GetSelfLink() + list.ResourceVersion = li.GetResourceVersion() + list.Continue = li.GetContinue() + return list, nil + + case groupVersion == metav1.SchemeGroupVersion: + list := &metav1.PartialObjectMetadataList{} + err := meta.EachListItem(result, func(obj runtime.Object) error { + m, err := meta.Accessor(obj) + if err != nil { + return err + } + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(gvk) + list.Items = append(list.Items, *partial) + return nil + }) + if err != nil { + return nil, err + } + list.SelfLink = li.GetSelfLink() + list.ResourceVersion = li.GetResourceVersion() + list.Continue = li.GetContinue() + return list, nil + + default: + return nil, newNotAcceptableError(fmt.Sprintf("no PartialObjectMetadataList exists in group version %s", groupVersion)) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/doc.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/doc.go new file mode 100644 index 0000000000..b76758b793 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package responsewriters containers helpers to write responses in HTTP handlers. +package responsewriters // import "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go new file mode 100644 index 0000000000..d13bee4d22 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go @@ -0,0 +1,78 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package responsewriters + +import ( + "context" + "fmt" + "net/http" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// Avoid emitting errors that look like valid HTML. Quotes are okay. +var sanitizer = strings.NewReplacer(`&`, "&", `<`, "<", `>`, ">") + +// Forbidden renders a simple forbidden error +func Forbidden(ctx context.Context, attributes authorizer.Attributes, w http.ResponseWriter, req *http.Request, reason string, s runtime.NegotiatedSerializer) { + msg := sanitizer.Replace(forbiddenMessage(attributes)) + w.Header().Set("X-Content-Type-Options", "nosniff") + + var errMsg string + if len(reason) == 0 { + errMsg = fmt.Sprintf("%s", msg) + } else { + errMsg = fmt.Sprintf("%s: %s", msg, reason) + } + gv := schema.GroupVersion{Group: attributes.GetAPIGroup(), Version: attributes.GetAPIVersion()} + gr := schema.GroupResource{Group: attributes.GetAPIGroup(), Resource: attributes.GetResource()} + ErrorNegotiated(apierrors.NewForbidden(gr, attributes.GetName(), fmt.Errorf(errMsg)), s, gv, w, req) +} + +func forbiddenMessage(attributes authorizer.Attributes) string { + username := "" + if user := attributes.GetUser(); user != nil { + username = user.GetName() + } + + if !attributes.IsResourceRequest() { + return fmt.Sprintf("User %q cannot %s path %q", username, attributes.GetVerb(), attributes.GetPath()) + } + + resource := attributes.GetResource() + if subresource := attributes.GetSubresource(); len(subresource) > 0 { + resource = resource + "/" + subresource + } + + if ns := attributes.GetNamespace(); len(ns) > 0 { + return fmt.Sprintf("User %q cannot %s resource %q in API group %q in the namespace %q", username, attributes.GetVerb(), resource, attributes.GetAPIGroup(), ns) + } + + return fmt.Sprintf("User %q cannot %s resource %q in API group %q at the cluster scope", username, attributes.GetVerb(), resource, attributes.GetAPIGroup()) +} + +// InternalError renders a simple internal error +func InternalError(w http.ResponseWriter, req *http.Request, err error) { + http.Error(w, sanitizer.Replace(fmt.Sprintf("Internal Server Error: %q: %v", req.RequestURI, err)), + http.StatusInternalServerError) + utilruntime.HandleError(err) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go new file mode 100644 index 0000000000..5a84543503 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go @@ -0,0 +1,83 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package responsewriters + +import ( + "fmt" + "net/http" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/storage" +) + +// statusError is an object that can be converted into an metav1.Status +type statusError interface { + Status() metav1.Status +} + +// ErrorToAPIStatus converts an error to an metav1.Status object. +func ErrorToAPIStatus(err error) *metav1.Status { + switch t := err.(type) { + case statusError: + status := t.Status() + if len(status.Status) == 0 { + status.Status = metav1.StatusFailure + } + switch status.Status { + case metav1.StatusSuccess: + if status.Code == 0 { + status.Code = http.StatusOK + } + case metav1.StatusFailure: + if status.Code == 0 { + status.Code = http.StatusInternalServerError + } + default: + runtime.HandleError(fmt.Errorf("apiserver received an error with wrong status field : %#+v", err)) + if status.Code == 0 { + status.Code = http.StatusInternalServerError + } + } + status.Kind = "Status" + status.APIVersion = "v1" + //TODO: check for invalid responses + return &status + default: + status := http.StatusInternalServerError + switch { + //TODO: replace me with NewConflictErr + case storage.IsConflict(err): + status = http.StatusConflict + } + // Log errors that were not converted to an error status + // by REST storage - these typically indicate programmer + // error by not using pkg/api/errors, or unexpected failure + // cases. + runtime.HandleError(fmt.Errorf("apiserver received an error that is not an metav1.Status: %#+v", err)) + return &metav1.Status{ + TypeMeta: metav1.TypeMeta{ + Kind: "Status", + APIVersion: "v1", + }, + Status: metav1.StatusFailure, + Code: int32(status), + Reason: metav1.StatusReasonUnknown, + Message: err.Error(), + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go new file mode 100644 index 0000000000..65cb389e51 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go @@ -0,0 +1,286 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package responsewriters + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "sync" + + "k8s.io/apiserver/pkg/features" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/apiserver/pkg/util/flushwriter" + "k8s.io/apiserver/pkg/util/wsstream" +) + +// StreamObject performs input stream negotiation from a ResourceStreamer and writes that to the response. +// If the client requests a websocket upgrade, negotiate for a websocket reader protocol (because many +// browser clients cannot easily handle binary streaming protocols). +func StreamObject(statusCode int, gv schema.GroupVersion, s runtime.NegotiatedSerializer, stream rest.ResourceStreamer, w http.ResponseWriter, req *http.Request) { + out, flush, contentType, err := stream.InputStream(req.Context(), gv.String(), req.Header.Get("Accept")) + if err != nil { + ErrorNegotiated(err, s, gv, w, req) + return + } + if out == nil { + // No output provided - return StatusNoContent + w.WriteHeader(http.StatusNoContent) + return + } + defer out.Close() + + if wsstream.IsWebSocketRequest(req) { + r := wsstream.NewReader(out, true, wsstream.NewDefaultReaderProtocols()) + if err := r.Copy(w, req); err != nil { + utilruntime.HandleError(fmt.Errorf("error encountered while streaming results via websocket: %v", err)) + } + return + } + + if len(contentType) == 0 { + contentType = "application/octet-stream" + } + w.Header().Set("Content-Type", contentType) + w.WriteHeader(statusCode) + // Flush headers, if possible + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + writer := w.(io.Writer) + if flush { + writer = flushwriter.Wrap(w) + } + io.Copy(writer, out) +} + +// SerializeObject renders an object in the content type negotiated by the client using the provided encoder. +// The context is optional and can be nil. This method will perform optional content compression if requested by +// a client and the feature gate for APIResponseCompression is enabled. +func SerializeObject(mediaType string, encoder runtime.Encoder, hw http.ResponseWriter, req *http.Request, statusCode int, object runtime.Object) { + w := &deferredResponseWriter{ + mediaType: mediaType, + statusCode: statusCode, + contentEncoding: negotiateContentEncoding(req), + hw: hw, + } + + err := encoder.Encode(object, w) + if err == nil { + err = w.Close() + if err != nil { + // we cannot write an error to the writer anymore as the Encode call was successful. + utilruntime.HandleError(fmt.Errorf("apiserver was unable to close cleanly the response writer: %v", err)) + } + return + } + + // make a best effort to write the object if a failure is detected + utilruntime.HandleError(fmt.Errorf("apiserver was unable to write a JSON response: %v", err)) + status := ErrorToAPIStatus(err) + candidateStatusCode := int(status.Code) + // if the current status code is successful, allow the error's status code to overwrite it + if statusCode >= http.StatusOK && statusCode < http.StatusBadRequest { + w.statusCode = candidateStatusCode + } + output, err := runtime.Encode(encoder, status) + if err != nil { + w.mediaType = "text/plain" + output = []byte(fmt.Sprintf("%s: %s", status.Reason, status.Message)) + } + if _, err := w.Write(output); err != nil { + utilruntime.HandleError(fmt.Errorf("apiserver was unable to write a fallback JSON response: %v", err)) + } + w.Close() +} + +var gzipPool = &sync.Pool{ + New: func() interface{} { + gw, err := gzip.NewWriterLevel(nil, defaultGzipContentEncodingLevel) + if err != nil { + panic(err) + } + return gw + }, +} + +const ( + // defaultGzipContentEncodingLevel is set to 4 which uses less CPU than the default level + defaultGzipContentEncodingLevel = 4 + // defaultGzipThresholdBytes is compared to the size of the first write from the stream + // (usually the entire object), and if the size is smaller no gzipping will be performed + // if the client requests it. + defaultGzipThresholdBytes = 128 * 1024 +) + +// negotiateContentEncoding returns a supported client-requested content encoding for the +// provided request. It will return the empty string if no supported content encoding was +// found or if response compression is disabled. +func negotiateContentEncoding(req *http.Request) string { + encoding := req.Header.Get("Accept-Encoding") + if len(encoding) == 0 { + return "" + } + if !utilfeature.DefaultFeatureGate.Enabled(features.APIResponseCompression) { + return "" + } + for len(encoding) > 0 { + var token string + if next := strings.Index(encoding, ","); next != -1 { + token = encoding[:next] + encoding = encoding[next+1:] + } else { + token = encoding + encoding = "" + } + switch strings.TrimSpace(token) { + case "gzip": + return "gzip" + } + } + return "" +} + +type deferredResponseWriter struct { + mediaType string + statusCode int + contentEncoding string + + hasWritten bool + hw http.ResponseWriter + w io.Writer +} + +func (w *deferredResponseWriter) Write(p []byte) (n int, err error) { + if w.hasWritten { + return w.w.Write(p) + } + w.hasWritten = true + + hw := w.hw + header := hw.Header() + switch { + case w.contentEncoding == "gzip" && len(p) > defaultGzipThresholdBytes: + header.Set("Content-Encoding", "gzip") + header.Add("Vary", "Accept-Encoding") + + gw := gzipPool.Get().(*gzip.Writer) + gw.Reset(hw) + + w.w = gw + default: + w.w = hw + } + + header.Set("Content-Type", w.mediaType) + hw.WriteHeader(w.statusCode) + return w.w.Write(p) +} + +func (w *deferredResponseWriter) Close() error { + if !w.hasWritten { + return nil + } + var err error + switch t := w.w.(type) { + case *gzip.Writer: + err = t.Close() + t.Reset(nil) + gzipPool.Put(t) + } + return err +} + +var nopCloser = ioutil.NopCloser(nil) + +// WriteObjectNegotiated renders an object in the content type negotiated by the client. +func WriteObjectNegotiated(s runtime.NegotiatedSerializer, restrictions negotiation.EndpointRestrictions, gv schema.GroupVersion, w http.ResponseWriter, req *http.Request, statusCode int, object runtime.Object) { + stream, ok := object.(rest.ResourceStreamer) + if ok { + requestInfo, _ := request.RequestInfoFrom(req.Context()) + metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() { + StreamObject(statusCode, gv, s, stream, w, req) + }) + return + } + + _, serializer, err := negotiation.NegotiateOutputMediaType(req, s, restrictions) + if err != nil { + // if original statusCode was not successful we need to return the original error + // we cannot hide it behind negotiation problems + if statusCode < http.StatusOK || statusCode >= http.StatusBadRequest { + WriteRawJSON(int(statusCode), object, w) + return + } + status := ErrorToAPIStatus(err) + WriteRawJSON(int(status.Code), status, w) + return + } + + if ae := request.AuditEventFrom(req.Context()); ae != nil { + audit.LogResponseObject(ae, object, gv, s) + } + + encoder := s.EncoderForVersion(serializer.Serializer, gv) + SerializeObject(serializer.MediaType, encoder, w, req, statusCode, object) +} + +// ErrorNegotiated renders an error to the response. Returns the HTTP status code of the error. +// The context is optional and may be nil. +func ErrorNegotiated(err error, s runtime.NegotiatedSerializer, gv schema.GroupVersion, w http.ResponseWriter, req *http.Request) int { + status := ErrorToAPIStatus(err) + code := int(status.Code) + // when writing an error, check to see if the status indicates a retry after period + if status.Details != nil && status.Details.RetryAfterSeconds > 0 { + delay := strconv.Itoa(int(status.Details.RetryAfterSeconds)) + w.Header().Set("Retry-After", delay) + } + + if code == http.StatusNoContent { + w.WriteHeader(code) + return code + } + + WriteObjectNegotiated(s, negotiation.DefaultEndpointRestrictions, gv, w, req, code, status) + return code +} + +// WriteRawJSON writes a non-API object in JSON. +func WriteRawJSON(statusCode int, object interface{}, w http.ResponseWriter) { + output, err := json.MarshalIndent(object, "", " ") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + w.Write(output) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go new file mode 100644 index 0000000000..01396f2716 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -0,0 +1,529 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "context" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + goruntime "runtime" + "strings" + "time" + + grpccodes "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/apiserver/pkg/warning" + "k8s.io/klog/v2" +) + +const ( + // DuplicateOwnerReferencesWarningFormat is the warning that a client receives when a create/update request contains + // duplicate owner reference entries. + DuplicateOwnerReferencesWarningFormat = ".metadata.ownerReferences contains duplicate entries; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v" + // DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat indicates the duplication was observed + // after mutating admission. + // NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission. + // For PATCH request the API server only dedups after mutating admission. + DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat = ".metadata.ownerReferences contains duplicate entries after mutating admission happens; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v" +) + +// RequestScope encapsulates common fields across all RESTful handler methods. +type RequestScope struct { + Namer ScopeNamer + + Serializer runtime.NegotiatedSerializer + runtime.ParameterCodec + + // StandardSerializers, if set, restricts which serializers can be used when + // we aren't transforming the output (into Table or PartialObjectMetadata). + // Used only by CRDs which do not yet support Protobuf. + StandardSerializers []runtime.SerializerInfo + + Creater runtime.ObjectCreater + Convertor runtime.ObjectConvertor + Defaulter runtime.ObjectDefaulter + Typer runtime.ObjectTyper + UnsafeConvertor runtime.ObjectConvertor + Authorizer authorizer.Authorizer + + EquivalentResourceMapper runtime.EquivalentResourceMapper + + TableConvertor rest.TableConvertor + FieldManager *fieldmanager.FieldManager + + Resource schema.GroupVersionResource + Kind schema.GroupVersionKind + Subresource string + + MetaGroupVersion schema.GroupVersion + + // HubGroupVersion indicates what version objects read from etcd or incoming requests should be converted to for in-memory handling. + HubGroupVersion schema.GroupVersion + + MaxRequestBodyBytes int64 +} + +func (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Request) { + responsewriters.ErrorNegotiated(err, scope.Serializer, scope.Kind.GroupVersion(), w, req) +} + +func (scope *RequestScope) AllowsMediaTypeTransform(mimeType, mimeSubType string, gvk *schema.GroupVersionKind) bool { + // some handlers like CRDs can't serve all the mime types that PartialObjectMetadata or Table can - if + // gvk is nil (no conversion) allow StandardSerializers to further restrict the set of mime types. + if gvk == nil { + if len(scope.StandardSerializers) == 0 { + return true + } + for _, info := range scope.StandardSerializers { + if info.MediaTypeType == mimeType && info.MediaTypeSubType == mimeSubType { + return true + } + } + return false + } + + // TODO: this is temporary, replace with an abstraction calculated at endpoint installation time + if gvk.GroupVersion() == metav1beta1.SchemeGroupVersion || gvk.GroupVersion() == metav1.SchemeGroupVersion { + switch gvk.Kind { + case "Table": + return scope.TableConvertor != nil && + mimeType == "application" && + (mimeSubType == "json" || mimeSubType == "yaml") + case "PartialObjectMetadata", "PartialObjectMetadataList": + // TODO: should delineate between lists and non-list endpoints + return true + default: + return false + } + } + return false +} + +func (scope *RequestScope) AllowsServerVersion(version string) bool { + return version == scope.MetaGroupVersion.Version +} + +func (scope *RequestScope) AllowsStreamSchema(s string) bool { + return s == "watch" +} + +var _ admission.ObjectInterfaces = &RequestScope{} + +func (r *RequestScope) GetObjectCreater() runtime.ObjectCreater { return r.Creater } +func (r *RequestScope) GetObjectTyper() runtime.ObjectTyper { return r.Typer } +func (r *RequestScope) GetObjectDefaulter() runtime.ObjectDefaulter { return r.Defaulter } +func (r *RequestScope) GetObjectConvertor() runtime.ObjectConvertor { return r.Convertor } +func (r *RequestScope) GetEquivalentResourceMapper() runtime.EquivalentResourceMapper { + return r.EquivalentResourceMapper +} + +// ConnectResource returns a function that handles a connect request on a rest.Storage object. +func ConnectResource(connecter rest.Connecter, scope *RequestScope, admit admission.Interface, restPath string, isSubresource bool) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + if isDryRun(req.URL) { + scope.err(errors.NewBadRequest("dryRun is not supported"), w, req) + return + } + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + ctx := req.Context() + ctx = request.WithNamespace(ctx, namespace) + ae := request.AuditEventFrom(ctx) + admit = admission.WithAudit(admit, ae) + + opts, subpath, subpathKey := connecter.NewConnectOptions() + if err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + if admit != nil && admit.Handles(admission.Connect) { + userInfo, _ := request.UserFrom(ctx) + // TODO: remove the mutating admission here as soon as we have ported all plugin that handle CONNECT + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok { + err = mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope) + if err != nil { + scope.err(err, w, req) + return + } + } + if validatingAdmission, ok := admit.(admission.ValidationInterface); ok { + err = validatingAdmission.Validate(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope) + if err != nil { + scope.err(err, w, req) + return + } + } + } + requestInfo, _ := request.RequestInfoFrom(ctx) + metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() { + handler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, w: w}) + if err != nil { + scope.err(err, w, req) + return + } + handler.ServeHTTP(w, req) + }) + } +} + +// responder implements rest.Responder for assisting a connector in writing objects or errors. +type responder struct { + scope *RequestScope + req *http.Request + w http.ResponseWriter +} + +func (r *responder) Object(statusCode int, obj runtime.Object) { + responsewriters.WriteObjectNegotiated(r.scope.Serializer, r.scope, r.scope.Kind.GroupVersion(), r.w, r.req, statusCode, obj) +} + +func (r *responder) Error(err error) { + r.scope.err(err, r.w, r.req) +} + +// resultFunc is a function that returns a rest result and can be run in a goroutine +type resultFunc func() (runtime.Object, error) + +// finishRequest makes a given resultFunc asynchronous and handles errors returned by the response. +// An api.Status object with status != success is considered an "error", which interrupts the normal response flow. +func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) { + // these channels need to be buffered to prevent the goroutine below from hanging indefinitely + // when the select statement reads something other than the one the goroutine sends on. + ch := make(chan runtime.Object, 1) + errCh := make(chan error, 1) + panicCh := make(chan interface{}, 1) + go func() { + // panics don't cross goroutine boundaries, so we have to handle ourselves + defer func() { + panicReason := recover() + if panicReason != nil { + // do not wrap the sentinel ErrAbortHandler panic value + if panicReason != http.ErrAbortHandler { + // Same as stdlib http server code. Manually allocate stack + // trace buffer size to prevent excessively large logs + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:goruntime.Stack(buf, false)] + panicReason = fmt.Sprintf("%v\n%s", panicReason, buf) + } + // Propagate to parent goroutine + panicCh <- panicReason + } + }() + + if result, err := fn(); err != nil { + errCh <- err + } else { + ch <- result + } + }() + + select { + case result = <-ch: + if status, ok := result.(*metav1.Status); ok { + if status.Status != metav1.StatusSuccess { + return nil, errors.FromObject(status) + } + } + return result, nil + case err = <-errCh: + return nil, err + case p := <-panicCh: + panic(p) + case <-time.After(timeout): + return nil, errors.NewTimeoutError(fmt.Sprintf("request did not complete within requested timeout %s", timeout), 0) + } +} + +// transformDecodeError adds additional information into a bad-request api error when a decode fails. +func transformDecodeError(typer runtime.ObjectTyper, baseErr error, into runtime.Object, gvk *schema.GroupVersionKind, body []byte) error { + objGVKs, _, err := typer.ObjectKinds(into) + if err != nil { + return errors.NewBadRequest(err.Error()) + } + objGVK := objGVKs[0] + if gvk != nil && len(gvk.Kind) > 0 { + return errors.NewBadRequest(fmt.Sprintf("%s in version %q cannot be handled as a %s: %v", gvk.Kind, gvk.Version, objGVK.Kind, baseErr)) + } + summary := summarizeData(body, 30) + return errors.NewBadRequest(fmt.Sprintf("the object provided is unrecognized (must be of type %s): %v (%s)", objGVK.Kind, baseErr, summary)) +} + +// setSelfLink sets the self link of an object (or the child items in a list) to the base URL of the request +// plus the path and query generated by the provided linkFunc +func setSelfLink(obj runtime.Object, requestInfo *request.RequestInfo, namer ScopeNamer) error { + // TODO: SelfLink generation should return a full URL? + uri, err := namer.GenerateLink(requestInfo, obj) + if err != nil { + return nil + } + + return namer.SetSelfLink(obj, uri) +} + +func hasUID(obj runtime.Object) (bool, error) { + if obj == nil { + return false, nil + } + accessor, err := meta.Accessor(obj) + if err != nil { + return false, errors.NewInternalError(err) + } + if len(accessor.GetUID()) == 0 { + return false, nil + } + return true, nil +} + +// checkName checks the provided name against the request +func checkName(obj runtime.Object, name, namespace string, namer ScopeNamer) error { + objNamespace, objName, err := namer.ObjectName(obj) + if err != nil { + return errors.NewBadRequest(fmt.Sprintf( + "the name of the object (%s based on URL) was undeterminable: %v", name, err)) + } + if objName != name { + return errors.NewBadRequest(fmt.Sprintf( + "the name of the object (%s) does not match the name on the URL (%s)", objName, name)) + } + if len(namespace) > 0 { + if len(objNamespace) > 0 && objNamespace != namespace { + return errors.NewBadRequest(fmt.Sprintf( + "the namespace of the object (%s) does not match the namespace on the request (%s)", objNamespace, namespace)) + } + } + + return nil +} + +// dedupOwnerReferences dedups owner references over the entire entry. +// NOTE: We don't know enough about the existing cases of owner references +// sharing the same UID but different fields. Nor do we know what might break. +// In the future we may just dedup/reject owner references with the same UID. +func dedupOwnerReferences(refs []metav1.OwnerReference) ([]metav1.OwnerReference, []string) { + var result []metav1.OwnerReference + var duplicates []string + seen := make(map[types.UID]struct{}) + for _, ref := range refs { + _, ok := seen[ref.UID] + // Short-circuit if we haven't seen the UID before. Otherwise + // check the entire list we have so far. + if !ok || !hasOwnerReference(result, ref) { + seen[ref.UID] = struct{}{} + result = append(result, ref) + } else { + duplicates = append(duplicates, string(ref.UID)) + } + } + return result, duplicates +} + +// hasOwnerReference returns true if refs has an item equal to ref. The function +// focuses on semantic equality instead of memory equality, to catch duplicates +// with different pointer addresses. The function uses apiequality.Semantic +// instead of implementing its own comparison, to tolerate API changes to +// metav1.OwnerReference. +// NOTE: This is expensive, but we accept it because we've made sure it only +// happens to owner references containing duplicate UIDs, plus typically the +// number of items in the list should be small. +func hasOwnerReference(refs []metav1.OwnerReference, ref metav1.OwnerReference) bool { + for _, r := range refs { + if apiequality.Semantic.DeepEqual(r, ref) { + return true + } + } + return false +} + +// dedupOwnerReferencesAndAddWarning dedups owner references in the object metadata. +// If duplicates are found, the function records a warning to the provided context. +func dedupOwnerReferencesAndAddWarning(obj runtime.Object, requestContext context.Context, afterMutatingAdmission bool) { + accessor, err := meta.Accessor(obj) + if err != nil { + // The object doesn't have metadata. Nothing we need to do here. + return + } + refs := accessor.GetOwnerReferences() + deduped, duplicates := dedupOwnerReferences(refs) + if len(duplicates) > 0 { + // NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission. + // For PATCH request the API server only dedups after mutating admission. + format := DuplicateOwnerReferencesWarningFormat + if afterMutatingAdmission { + format = DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat + } + warning.AddWarning(requestContext, "", fmt.Sprintf(format, + strings.Join(duplicates, ", "))) + accessor.SetOwnerReferences(deduped) + } +} + +// setObjectSelfLink sets the self link of an object as needed. +// TODO: remove the need for the namer LinkSetters by requiring objects implement either Object or List +// interfaces +func setObjectSelfLink(ctx context.Context, obj runtime.Object, req *http.Request, namer ScopeNamer) error { + if utilfeature.DefaultFeatureGate.Enabled(features.RemoveSelfLink) { + // Ensure that for empty lists we don't return items. + if meta.IsListType(obj) && meta.LenList(obj) == 0 { + if err := meta.SetList(obj, []runtime.Object{}); err != nil { + return err + } + } + return nil + } + + // We only generate list links on objects that implement ListInterface - historically we duck typed this + // check via reflection, but as we move away from reflection we require that you not only carry Items but + // ListMeta into order to be identified as a list. + if !meta.IsListType(obj) { + requestInfo, ok := request.RequestInfoFrom(ctx) + if !ok { + return fmt.Errorf("missing requestInfo") + } + return setSelfLink(obj, requestInfo, namer) + } + + uri, err := namer.GenerateListLink(req) + if err != nil { + return err + } + if err := namer.SetSelfLink(obj, uri); err != nil { + klog.V(4).Infof("Unable to set self link on object: %v", err) + } + requestInfo, ok := request.RequestInfoFrom(ctx) + if !ok { + return fmt.Errorf("missing requestInfo") + } + + count := 0 + err = meta.EachListItem(obj, func(obj runtime.Object) error { + count++ + return setSelfLink(obj, requestInfo, namer) + }) + + if count == 0 { + if err := meta.SetList(obj, []runtime.Object{}); err != nil { + return err + } + } + + return err +} + +func summarizeData(data []byte, maxLength int) string { + switch { + case len(data) == 0: + return "" + case data[0] == '{': + if len(data) > maxLength { + return string(data[:maxLength]) + " ..." + } + return string(data) + default: + if len(data) > maxLength { + return hex.EncodeToString(data[:maxLength]) + " ..." + } + return hex.EncodeToString(data) + } +} + +func limitedReadBody(req *http.Request, limit int64) ([]byte, error) { + defer req.Body.Close() + if limit <= 0 { + return ioutil.ReadAll(req.Body) + } + lr := &io.LimitedReader{ + R: req.Body, + N: limit + 1, + } + data, err := ioutil.ReadAll(lr) + if err != nil { + return nil, err + } + if lr.N <= 0 { + return nil, errors.NewRequestEntityTooLargeError(fmt.Sprintf("limit is %d", limit)) + } + return data, nil +} + +func parseTimeout(str string) time.Duration { + if str != "" { + timeout, err := time.ParseDuration(str) + if err == nil { + return timeout + } + klog.Errorf("Failed to parse %q: %v", str, err) + } + // 34 chose as a number close to 30 that is likely to be unique enough to jump out at me the next time I see a timeout. Everyone chooses 30. + return 34 * time.Second +} + +func isDryRun(url *url.URL) bool { + return len(url.Query()["dryRun"]) != 0 +} + +type etcdError interface { + Code() grpccodes.Code + Error() string +} + +type grpcError interface { + GRPCStatus() *grpcstatus.Status +} + +func isTooLargeError(err error) bool { + if err != nil { + if etcdErr, ok := err.(etcdError); ok { + if etcdErr.Code() == grpccodes.InvalidArgument && etcdErr.Error() == "etcdserver: request is too large" { + return true + } + } + if grpcErr, ok := err.(grpcError); ok { + if grpcErr.GRPCStatus().Code() == grpccodes.ResourceExhausted && strings.Contains(grpcErr.GRPCStatus().Message(), "trying to send message larger than max") { + return true + } + } + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go new file mode 100644 index 0000000000..f0473323f9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -0,0 +1,269 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "context" + "fmt" + "net/http" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/util/dryrun" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utiltrace "k8s.io/utils/trace" +) + +// UpdateResource returns a function that will handle a resource update +func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interface) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("Update", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) + defer trace.LogIfLong(500 * time.Millisecond) + + if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { + scope.err(errors.NewBadRequest("the dryRun feature is disabled"), w, req) + return + } + + // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + ctx = request.WithNamespace(ctx, namespace) + + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) + if err != nil { + scope.err(err, w, req) + return + } + + options := &metav1.UpdateOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + if errs := validation.ValidateUpdateOptions(options); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "UpdateOptions"}, "", errs) + scope.err(err, w, req) + return + } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions")) + + s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) + if err != nil { + scope.err(err, w, req) + return + } + defaultGVK := scope.Kind + original := r.New() + + trace.Step("About to convert to expected version") + decoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion) + obj, gvk, err := decoder.Decode(body, &defaultGVK, original) + if err != nil { + err = transformDecodeError(scope.Typer, err, original, gvk, body) + scope.err(err, w, req) + return + } + if gvk.GroupVersion() != defaultGVK.GroupVersion() { + err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%s)", gvk.GroupVersion(), defaultGVK.GroupVersion())) + scope.err(err, w, req) + return + } + trace.Step("Conversion done") + + ae := request.AuditEventFrom(ctx) + audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) + admit = admission.WithAudit(admit, ae) + + if err := checkName(obj, name, namespace, scope.Namer); err != nil { + scope.err(err, w, req) + return + } + + userInfo, _ := request.UserFrom(ctx) + transformers := []rest.TransformFunc{} + + // allows skipping managedFields update if the resulting object is too big + shouldUpdateManagedFields := true + if scope.FieldManager != nil { + transformers = append(transformers, func(_ context.Context, newObj, liveObj runtime.Object) (runtime.Object, error) { + if shouldUpdateManagedFields { + return scope.FieldManager.UpdateNoErrors(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent())), nil + } + return newObj, nil + }) + } + + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok { + transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) { + isNotZeroObject, err := hasUID(oldObj) + if err != nil { + return nil, fmt.Errorf("unexpected error when extracting UID from oldObj: %v", err.Error()) + } else if !isNotZeroObject { + if mutatingAdmission.Handles(admission.Create) { + return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope) + } + } else { + if mutatingAdmission.Handles(admission.Update) { + return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope) + } + } + return newObj, nil + }) + transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) { + // Dedup owner references again after mutating admission happens + dedupOwnerReferencesAndAddWarning(newObj, req.Context(), true) + return newObj, nil + }) + } + + createAuthorizerAttributes := authorizer.AttributesRecord{ + User: userInfo, + ResourceRequest: true, + Path: req.URL.Path, + Verb: "create", + APIGroup: scope.Resource.Group, + APIVersion: scope.Resource.Version, + Resource: scope.Resource.Resource, + Subresource: scope.Subresource, + Namespace: namespace, + Name: name, + } + + trace.Step("About to store object in database") + wasCreated := false + requestFunc := func() (runtime.Object, error) { + obj, created, err := r.Update( + ctx, + name, + rest.DefaultUpdatedObjectInfo(obj, transformers...), + withAuthorization(rest.AdmissionToValidateObjectFunc( + admit, + admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope), + scope.Authorizer, createAuthorizerAttributes), + rest.AdmissionToValidateObjectUpdateFunc( + admit, + admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope), + false, + options, + ) + wasCreated = created + return obj, err + } + // Dedup owner references before updating managed fields + dedupOwnerReferencesAndAddWarning(obj, req.Context(), false) + result, err := finishRequest(timeout, func() (runtime.Object, error) { + result, err := requestFunc() + // If the object wasn't committed to storage because it's serialized size was too large, + // it is safe to remove managedFields (which can be large) and try again. + if isTooLargeError(err) && scope.FieldManager != nil { + if accessor, accessorErr := meta.Accessor(obj); accessorErr == nil { + accessor.SetManagedFields(nil) + shouldUpdateManagedFields = false + result, err = requestFunc() + } + } + return result, err + }) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Object stored in database") + + status := http.StatusOK + if wasCreated { + status = http.StatusCreated + } + + transformResponseObject(ctx, scope, trace, req, w, status, outputMediaType, result) + } +} + +func withAuthorization(validate rest.ValidateObjectFunc, a authorizer.Authorizer, attributes authorizer.Attributes) rest.ValidateObjectFunc { + var once sync.Once + var authorizerDecision authorizer.Decision + var authorizerReason string + var authorizerErr error + return func(ctx context.Context, obj runtime.Object) error { + if a == nil { + return errors.NewInternalError(fmt.Errorf("no authorizer provided, unable to authorize a create on update")) + } + once.Do(func() { + authorizerDecision, authorizerReason, authorizerErr = a.Authorize(ctx, attributes) + }) + // an authorizer like RBAC could encounter evaluation errors and still allow the request, so authorizer decision is checked before error here. + if authorizerDecision == authorizer.DecisionAllow { + // Continue to validating admission + return validate(ctx, obj) + } + if authorizerErr != nil { + return errors.NewInternalError(authorizerErr) + } + + // The user is not authorized to perform this action, so we need to build the error response + gr := schema.GroupResource{ + Group: attributes.GetAPIGroup(), + Resource: attributes.GetResource(), + } + name := attributes.GetName() + err := fmt.Errorf("%v", authorizerReason) + return errors.NewForbidden(gr, name, err) + } +} + +// updateToCreateOptions creates a CreateOptions with the same field values as the provided UpdateOptions. +func updateToCreateOptions(uo *metav1.UpdateOptions) *metav1.CreateOptions { + if uo == nil { + return nil + } + co := &metav1.CreateOptions{ + DryRun: uo.DryRun, + FieldManager: uo.FieldManager, + } + co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) + return co +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go new file mode 100644 index 0000000000..22945ccf6b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go @@ -0,0 +1,337 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "bytes" + "fmt" + "net/http" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/server/httplog" + "k8s.io/apiserver/pkg/util/wsstream" + + "golang.org/x/net/websocket" +) + +// nothing will ever be sent down this channel +var neverExitWatch <-chan time.Time = make(chan time.Time) + +// timeoutFactory abstracts watch timeout logic for testing +type TimeoutFactory interface { + TimeoutCh() (<-chan time.Time, func() bool) +} + +// realTimeoutFactory implements timeoutFactory +type realTimeoutFactory struct { + timeout time.Duration +} + +// TimeoutCh returns a channel which will receive something when the watch times out, +// and a cleanup function to call when this happens. +func (w *realTimeoutFactory) TimeoutCh() (<-chan time.Time, func() bool) { + if w.timeout == 0 { + return neverExitWatch, func() bool { return false } + } + t := time.NewTimer(w.timeout) + return t.C, t.Stop +} + +// serveWatch will serve a watch response. +// TODO: the functionality in this method and in WatchServer.Serve is not cleanly decoupled. +func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration) { + defer watcher.Stop() + + options, err := optionsForTransform(mediaTypeOptions, req) + if err != nil { + scope.err(err, w, req) + return + } + + // negotiate for the stream serializer from the scope's serializer + serializer, err := negotiation.NegotiateOutputMediaTypeStream(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + framer := serializer.StreamSerializer.Framer + streamSerializer := serializer.StreamSerializer.Serializer + encoder := scope.Serializer.EncoderForVersion(streamSerializer, scope.Kind.GroupVersion()) + useTextFraming := serializer.EncodesAsText + if framer == nil { + scope.err(fmt.Errorf("no framer defined for %q available for embedded encoding", serializer.MediaType), w, req) + return + } + // TODO: next step, get back mediaTypeOptions from negotiate and return the exact value here + mediaType := serializer.MediaType + if mediaType != runtime.ContentTypeJSON { + mediaType += ";stream=watch" + } + + // locate the appropriate embedded encoder based on the transform + var embeddedEncoder runtime.Encoder + contentKind, contentSerializer, transform := targetEncodingForTransform(scope, mediaTypeOptions, req) + if transform { + info, ok := runtime.SerializerInfoForMediaType(contentSerializer.SupportedMediaTypes(), serializer.MediaType) + if !ok { + scope.err(fmt.Errorf("no encoder for %q exists in the requested target %#v", serializer.MediaType, contentSerializer), w, req) + return + } + embeddedEncoder = contentSerializer.EncoderForVersion(info.Serializer, contentKind.GroupVersion()) + } else { + embeddedEncoder = scope.Serializer.EncoderForVersion(serializer.Serializer, contentKind.GroupVersion()) + } + + ctx := req.Context() + + server := &WatchServer{ + Watching: watcher, + Scope: scope, + + UseTextFraming: useTextFraming, + MediaType: mediaType, + Framer: framer, + Encoder: encoder, + EmbeddedEncoder: embeddedEncoder, + + Fixup: func(obj runtime.Object) runtime.Object { + result, err := transformObject(ctx, obj, options, mediaTypeOptions, scope, req) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to transform object %v: %v", reflect.TypeOf(obj), err)) + return obj + } + // When we are transformed to a table, use the table options as the state for whether we + // should print headers - on watch, we only want to print table headers on the first object + // and omit them on subsequent events. + if tableOptions, ok := options.(*metav1.TableOptions); ok { + tableOptions.NoHeaders = true + } + return result + }, + + TimeoutFactory: &realTimeoutFactory{timeout}, + } + + server.ServeHTTP(w, req) +} + +// WatchServer serves a watch.Interface over a websocket or vanilla HTTP. +type WatchServer struct { + Watching watch.Interface + Scope *RequestScope + + // true if websocket messages should use text framing (as opposed to binary framing) + UseTextFraming bool + // the media type this watch is being served with + MediaType string + // used to frame the watch stream + Framer runtime.Framer + // used to encode the watch stream event itself + Encoder runtime.Encoder + // used to encode the nested object in the watch stream + EmbeddedEncoder runtime.Encoder + // used to correct the object before we send it to the serializer + Fixup func(runtime.Object) runtime.Object + + TimeoutFactory TimeoutFactory +} + +// ServeHTTP serves a series of encoded events via HTTP with Transfer-Encoding: chunked +// or over a websocket connection. +func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { + kind := s.Scope.Kind + metrics.RegisteredWatchers.WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc() + defer metrics.RegisteredWatchers.WithLabelValues(kind.Group, kind.Version, kind.Kind).Dec() + + w = httplog.Unlogged(req, w) + + if wsstream.IsWebSocketRequest(req) { + w.Header().Set("Content-Type", s.MediaType) + websocket.Handler(s.HandleWS).ServeHTTP(w, req) + return + } + + flusher, ok := w.(http.Flusher) + if !ok { + err := fmt.Errorf("unable to start watch - can't get http.Flusher: %#v", w) + utilruntime.HandleError(err) + s.Scope.err(errors.NewInternalError(err), w, req) + return + } + + framer := s.Framer.NewFrameWriter(w) + if framer == nil { + // programmer error + err := fmt.Errorf("no stream framing support is available for media type %q", s.MediaType) + utilruntime.HandleError(err) + s.Scope.err(errors.NewBadRequest(err.Error()), w, req) + return + } + e := streaming.NewEncoder(framer, s.Encoder) + + // ensure the connection times out + timeoutCh, cleanup := s.TimeoutFactory.TimeoutCh() + defer cleanup() + + // begin the stream + w.Header().Set("Content-Type", s.MediaType) + w.Header().Set("Transfer-Encoding", "chunked") + w.WriteHeader(http.StatusOK) + flusher.Flush() + + var unknown runtime.Unknown + internalEvent := &metav1.InternalEvent{} + outEvent := &metav1.WatchEvent{} + buf := &bytes.Buffer{} + ch := s.Watching.ResultChan() + done := req.Context().Done() + + for { + select { + case <-done: + return + case <-timeoutCh: + return + case event, ok := <-ch: + if !ok { + // End of results. + return + } + metrics.WatchEvents.WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc() + + obj := s.Fixup(event.Object) + if err := s.EmbeddedEncoder.Encode(obj, buf); err != nil { + // unexpected error + utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", obj, err)) + return + } + + // ContentType is not required here because we are defaulting to the serializer + // type + unknown.Raw = buf.Bytes() + event.Object = &unknown + metrics.WatchEventsSizes.WithLabelValues(kind.Group, kind.Version, kind.Kind).Observe(float64(len(unknown.Raw))) + + *outEvent = metav1.WatchEvent{} + + // create the external type directly and encode it. Clients will only recognize the serialization we provide. + // The internal event is being reused, not reallocated so its just a few extra assignments to do it this way + // and we get the benefit of using conversion functions which already have to stay in sync + *internalEvent = metav1.InternalEvent(event) + err := metav1.Convert_v1_InternalEvent_To_v1_WatchEvent(internalEvent, outEvent, nil) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to convert watch object: %v", err)) + // client disconnect. + return + } + if err := e.Encode(outEvent); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v (%#v)", outEvent, err, e)) + // client disconnect. + return + } + if len(ch) == 0 { + flusher.Flush() + } + + buf.Reset() + } + } +} + +// HandleWS implements a websocket handler. +func (s *WatchServer) HandleWS(ws *websocket.Conn) { + defer ws.Close() + done := make(chan struct{}) + + go func() { + defer utilruntime.HandleCrash() + // This blocks until the connection is closed. + // Client should not send anything. + wsstream.IgnoreReceives(ws, 0) + // Once the client closes, we should also close + close(done) + }() + + var unknown runtime.Unknown + internalEvent := &metav1.InternalEvent{} + buf := &bytes.Buffer{} + streamBuf := &bytes.Buffer{} + ch := s.Watching.ResultChan() + + for { + select { + case <-done: + return + case event, ok := <-ch: + if !ok { + // End of results. + return + } + obj := s.Fixup(event.Object) + if err := s.EmbeddedEncoder.Encode(obj, buf); err != nil { + // unexpected error + utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", obj, err)) + return + } + + // ContentType is not required here because we are defaulting to the serializer + // type + unknown.Raw = buf.Bytes() + event.Object = &unknown + + // the internal event will be versioned by the encoder + // create the external type directly and encode it. Clients will only recognize the serialization we provide. + // The internal event is being reused, not reallocated so its just a few extra assignments to do it this way + // and we get the benefit of using conversion functions which already have to stay in sync + outEvent := &metav1.WatchEvent{} + *internalEvent = metav1.InternalEvent(event) + err := metav1.Convert_v1_InternalEvent_To_v1_WatchEvent(internalEvent, outEvent, nil) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to convert watch object: %v", err)) + // client disconnect. + return + } + if err := s.Encoder.Encode(outEvent, streamBuf); err != nil { + // encoding error + utilruntime.HandleError(fmt.Errorf("unable to encode event: %v", err)) + return + } + if s.UseTextFraming { + if err := websocket.Message.Send(ws, streamBuf.String()); err != nil { + // Client disconnect. + return + } + } else { + if err := websocket.Message.Send(ws, streamBuf.Bytes()); err != nil { + // Client disconnect. + return + } + } + buf.Reset() + streamBuf.Reset() + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go new file mode 100644 index 0000000000..6549771ced --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go @@ -0,0 +1,1246 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "fmt" + "net/http" + gpath "path" + "reflect" + "sort" + "strings" + "time" + "unicode" + + restful "github.com/emicklei/go-restful" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/endpoints/deprecation" + "k8s.io/apiserver/pkg/endpoints/discovery" + "k8s.io/apiserver/pkg/endpoints/handlers" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/metrics" + utilwarning "k8s.io/apiserver/pkg/endpoints/warning" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/storageversion" + utilfeature "k8s.io/apiserver/pkg/util/feature" + versioninfo "k8s.io/component-base/version" +) + +const ( + ROUTE_META_GVK = "x-kubernetes-group-version-kind" + ROUTE_META_ACTION = "x-kubernetes-action" +) + +type APIInstaller struct { + group *APIGroupVersion + prefix string // Path prefix where API resources are to be registered. + minRequestTimeout time.Duration +} + +// Struct capturing information about an action ("GET", "POST", "WATCH", "PROXY", etc). +type action struct { + Verb string // Verb identifying the action ("GET", "POST", "WATCH", "PROXY", etc). + Path string // The path of the action + Params []*restful.Parameter // List of parameters associated with the action. + Namer handlers.ScopeNamer + AllNamespaces bool // true iff the action is namespaced but works on aggregate result for all namespaces +} + +// An interface to see if one storage supports override its default verb for monitoring +type StorageMetricsOverride interface { + // OverrideMetricsVerb gives a storage object an opportunity to override the verb reported to the metrics endpoint + OverrideMetricsVerb(oldVerb string) (newVerb string) +} + +// An interface to see if an object supports swagger documentation as a method +type documentable interface { + SwaggerDoc() map[string]string +} + +// toDiscoveryKubeVerb maps an action.Verb to the logical kube verb, used for discovery +var toDiscoveryKubeVerb = map[string]string{ + "CONNECT": "", // do not list in discovery. + "DELETE": "delete", + "DELETECOLLECTION": "deletecollection", + "GET": "get", + "LIST": "list", + "PATCH": "patch", + "POST": "create", + "PROXY": "proxy", + "PUT": "update", + "WATCH": "watch", + "WATCHLIST": "watch", +} + +// Install handlers for API resources. +func (a *APIInstaller) Install() ([]metav1.APIResource, []*storageversion.ResourceInfo, *restful.WebService, []error) { + var apiResources []metav1.APIResource + var resourceInfos []*storageversion.ResourceInfo + var errors []error + ws := a.newWebService() + + // Register the paths in a deterministic (sorted) order to get a deterministic swagger spec. + paths := make([]string, len(a.group.Storage)) + var i int = 0 + for path := range a.group.Storage { + paths[i] = path + i++ + } + sort.Strings(paths) + for _, path := range paths { + apiResource, resourceInfo, err := a.registerResourceHandlers(path, a.group.Storage[path], ws) + if err != nil { + errors = append(errors, fmt.Errorf("error in registering resource: %s, %v", path, err)) + } + if apiResource != nil { + apiResources = append(apiResources, *apiResource) + } + if resourceInfo != nil { + resourceInfos = append(resourceInfos, resourceInfo) + } + } + return apiResources, resourceInfos, ws, errors +} + +// newWebService creates a new restful webservice with the api installer's prefix and version. +func (a *APIInstaller) newWebService() *restful.WebService { + ws := new(restful.WebService) + ws.Path(a.prefix) + // a.prefix contains "prefix/group/version" + ws.Doc("API at " + a.prefix) + // Backwards compatibility, we accepted objects with empty content-type at V1. + // If we stop using go-restful, we can default empty content-type to application/json on an + // endpoint by endpoint basis + ws.Consumes("*/*") + mediaTypes, streamMediaTypes := negotiation.MediaTypesForSerializer(a.group.Serializer) + ws.Produces(append(mediaTypes, streamMediaTypes...)...) + ws.ApiVersion(a.group.GroupVersion.String()) + + return ws +} + +// calculate the storage gvk, the gvk objects are converted to before persisted to the etcd. +func getStorageVersionKind(storageVersioner runtime.GroupVersioner, storage rest.Storage, typer runtime.ObjectTyper) (schema.GroupVersionKind, error) { + object := storage.New() + fqKinds, _, err := typer.ObjectKinds(object) + if err != nil { + return schema.GroupVersionKind{}, err + } + gvk, ok := storageVersioner.KindForGroupVersionKinds(fqKinds) + if !ok { + return schema.GroupVersionKind{}, fmt.Errorf("cannot find the storage version kind for %v", reflect.TypeOf(object)) + } + return gvk, nil +} + +// GetResourceKind returns the external group version kind registered for the given storage +// object. If the storage object is a subresource and has an override supplied for it, it returns +// the group version kind supplied in the override. +func GetResourceKind(groupVersion schema.GroupVersion, storage rest.Storage, typer runtime.ObjectTyper) (schema.GroupVersionKind, error) { + // Let the storage tell us exactly what GVK it has + if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok { + return gvkProvider.GroupVersionKind(groupVersion), nil + } + + object := storage.New() + fqKinds, _, err := typer.ObjectKinds(object) + if err != nil { + return schema.GroupVersionKind{}, err + } + + // a given go type can have multiple potential fully qualified kinds. Find the one that corresponds with the group + // we're trying to register here + fqKindToRegister := schema.GroupVersionKind{} + for _, fqKind := range fqKinds { + if fqKind.Group == groupVersion.Group { + fqKindToRegister = groupVersion.WithKind(fqKind.Kind) + break + } + } + if fqKindToRegister.Empty() { + return schema.GroupVersionKind{}, fmt.Errorf("unable to locate fully qualified kind for %v: found %v when registering for %v", reflect.TypeOf(object), fqKinds, groupVersion) + } + + // group is guaranteed to match based on the check above + return fqKindToRegister, nil +} + +func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storage, ws *restful.WebService) (*metav1.APIResource, *storageversion.ResourceInfo, error) { + admit := a.group.Admit + + optionsExternalVersion := a.group.GroupVersion + if a.group.OptionsExternalVersion != nil { + optionsExternalVersion = *a.group.OptionsExternalVersion + } + + resource, subresource, err := splitSubresource(path) + if err != nil { + return nil, nil, err + } + + group, version := a.group.GroupVersion.Group, a.group.GroupVersion.Version + + fqKindToRegister, err := GetResourceKind(a.group.GroupVersion, storage, a.group.Typer) + if err != nil { + return nil, nil, err + } + + versionedPtr, err := a.group.Creater.New(fqKindToRegister) + if err != nil { + return nil, nil, err + } + defaultVersionedObject := indirectArbitraryPointer(versionedPtr) + kind := fqKindToRegister.Kind + isSubresource := len(subresource) > 0 + + // If there is a subresource, namespace scoping is defined by the parent resource + namespaceScoped := true + if isSubresource { + parentStorage, ok := a.group.Storage[resource] + if !ok { + return nil, nil, fmt.Errorf("missing parent storage: %q", resource) + } + scoper, ok := parentStorage.(rest.Scoper) + if !ok { + return nil, nil, fmt.Errorf("%q must implement scoper", resource) + } + namespaceScoped = scoper.NamespaceScoped() + + } else { + scoper, ok := storage.(rest.Scoper) + if !ok { + return nil, nil, fmt.Errorf("%q must implement scoper", resource) + } + namespaceScoped = scoper.NamespaceScoped() + } + + // what verbs are supported by the storage, used to know what verbs we support per path + creater, isCreater := storage.(rest.Creater) + namedCreater, isNamedCreater := storage.(rest.NamedCreater) + lister, isLister := storage.(rest.Lister) + getter, isGetter := storage.(rest.Getter) + getterWithOptions, isGetterWithOptions := storage.(rest.GetterWithOptions) + gracefulDeleter, isGracefulDeleter := storage.(rest.GracefulDeleter) + collectionDeleter, isCollectionDeleter := storage.(rest.CollectionDeleter) + updater, isUpdater := storage.(rest.Updater) + patcher, isPatcher := storage.(rest.Patcher) + watcher, isWatcher := storage.(rest.Watcher) + connecter, isConnecter := storage.(rest.Connecter) + storageMeta, isMetadata := storage.(rest.StorageMetadata) + storageVersionProvider, isStorageVersionProvider := storage.(rest.StorageVersionProvider) + if !isMetadata { + storageMeta = defaultStorageMetadata{} + } + exporter, isExporter := storage.(rest.Exporter) + if !isExporter { + exporter = nil + } + + versionedExportOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("ExportOptions")) + if err != nil { + return nil, nil, err + } + + if isNamedCreater { + isCreater = true + } + + var versionedList interface{} + if isLister { + list := lister.NewList() + listGVKs, _, err := a.group.Typer.ObjectKinds(list) + if err != nil { + return nil, nil, err + } + versionedListPtr, err := a.group.Creater.New(a.group.GroupVersion.WithKind(listGVKs[0].Kind)) + if err != nil { + return nil, nil, err + } + versionedList = indirectArbitraryPointer(versionedListPtr) + } + + versionedListOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("ListOptions")) + if err != nil { + return nil, nil, err + } + versionedCreateOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("CreateOptions")) + if err != nil { + return nil, nil, err + } + versionedPatchOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("PatchOptions")) + if err != nil { + return nil, nil, err + } + versionedUpdateOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("UpdateOptions")) + if err != nil { + return nil, nil, err + } + + var versionedDeleteOptions runtime.Object + var versionedDeleterObject interface{} + deleteReturnsDeletedObject := false + if isGracefulDeleter { + versionedDeleteOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind("DeleteOptions")) + if err != nil { + return nil, nil, err + } + versionedDeleterObject = indirectArbitraryPointer(versionedDeleteOptions) + + if mayReturnFullObjectDeleter, ok := storage.(rest.MayReturnFullObjectDeleter); ok { + deleteReturnsDeletedObject = mayReturnFullObjectDeleter.DeleteReturnsDeletedObject() + } + } + + versionedStatusPtr, err := a.group.Creater.New(optionsExternalVersion.WithKind("Status")) + if err != nil { + return nil, nil, err + } + versionedStatus := indirectArbitraryPointer(versionedStatusPtr) + var ( + getOptions runtime.Object + versionedGetOptions runtime.Object + getOptionsInternalKind schema.GroupVersionKind + getSubpath bool + ) + if isGetterWithOptions { + getOptions, getSubpath, _ = getterWithOptions.NewGetOptions() + getOptionsInternalKinds, _, err := a.group.Typer.ObjectKinds(getOptions) + if err != nil { + return nil, nil, err + } + getOptionsInternalKind = getOptionsInternalKinds[0] + versionedGetOptions, err = a.group.Creater.New(a.group.GroupVersion.WithKind(getOptionsInternalKind.Kind)) + if err != nil { + versionedGetOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(getOptionsInternalKind.Kind)) + if err != nil { + return nil, nil, err + } + } + isGetter = true + } + + var versionedWatchEvent interface{} + if isWatcher { + versionedWatchEventPtr, err := a.group.Creater.New(a.group.GroupVersion.WithKind("WatchEvent")) + if err != nil { + return nil, nil, err + } + versionedWatchEvent = indirectArbitraryPointer(versionedWatchEventPtr) + } + + var ( + connectOptions runtime.Object + versionedConnectOptions runtime.Object + connectOptionsInternalKind schema.GroupVersionKind + connectSubpath bool + ) + if isConnecter { + connectOptions, connectSubpath, _ = connecter.NewConnectOptions() + if connectOptions != nil { + connectOptionsInternalKinds, _, err := a.group.Typer.ObjectKinds(connectOptions) + if err != nil { + return nil, nil, err + } + + connectOptionsInternalKind = connectOptionsInternalKinds[0] + versionedConnectOptions, err = a.group.Creater.New(a.group.GroupVersion.WithKind(connectOptionsInternalKind.Kind)) + if err != nil { + versionedConnectOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(connectOptionsInternalKind.Kind)) + if err != nil { + return nil, nil, err + } + } + } + } + + allowWatchList := isWatcher && isLister // watching on lists is allowed only for kinds that support both watch and list. + nameParam := ws.PathParameter("name", "name of the "+kind).DataType("string") + pathParam := ws.PathParameter("path", "path to the resource").DataType("string") + + params := []*restful.Parameter{} + actions := []action{} + + var resourceKind string + kindProvider, ok := storage.(rest.KindProvider) + if ok { + resourceKind = kindProvider.Kind() + } else { + resourceKind = kind + } + + tableProvider, isTableProvider := storage.(rest.TableConvertor) + if isLister && !isTableProvider { + // All listers must implement TableProvider + return nil, nil, fmt.Errorf("%q must implement TableConvertor", resource) + } + + var apiResource metav1.APIResource + if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionHash) && + isStorageVersionProvider && + storageVersionProvider.StorageVersion() != nil { + versioner := storageVersionProvider.StorageVersion() + gvk, err := getStorageVersionKind(versioner, storage, a.group.Typer) + if err != nil { + return nil, nil, err + } + apiResource.StorageVersionHash = discovery.StorageVersionHash(gvk.Group, gvk.Version, gvk.Kind) + } + + // Get the list of actions for the given scope. + switch { + case !namespaceScoped: + // Handle non-namespace scoped resources like nodes. + resourcePath := resource + resourceParams := params + itemPath := resourcePath + "/{name}" + nameParams := append(params, nameParam) + proxyParams := append(nameParams, pathParam) + suffix := "" + if isSubresource { + suffix = "/" + subresource + itemPath = itemPath + suffix + resourcePath = itemPath + resourceParams = nameParams + } + apiResource.Name = path + apiResource.Namespaced = false + apiResource.Kind = resourceKind + namer := handlers.ContextBasedNaming{ + SelfLinker: a.group.Linker, + ClusterScoped: true, + SelfLinkPathPrefix: gpath.Join(a.prefix, resource) + "/", + SelfLinkPathSuffix: suffix, + } + + // Handler for standard REST verbs (GET, PUT, POST and DELETE). + // Add actions at the resource path: /api/apiVersion/resource + actions = appendIf(actions, action{"LIST", resourcePath, resourceParams, namer, false}, isLister) + actions = appendIf(actions, action{"POST", resourcePath, resourceParams, namer, false}, isCreater) + actions = appendIf(actions, action{"DELETECOLLECTION", resourcePath, resourceParams, namer, false}, isCollectionDeleter) + // DEPRECATED in 1.11 + actions = appendIf(actions, action{"WATCHLIST", "watch/" + resourcePath, resourceParams, namer, false}, allowWatchList) + + // Add actions at the item path: /api/apiVersion/resource/{name} + actions = appendIf(actions, action{"GET", itemPath, nameParams, namer, false}, isGetter) + if getSubpath { + actions = appendIf(actions, action{"GET", itemPath + "/{path:*}", proxyParams, namer, false}, isGetter) + } + actions = appendIf(actions, action{"PUT", itemPath, nameParams, namer, false}, isUpdater) + actions = appendIf(actions, action{"PATCH", itemPath, nameParams, namer, false}, isPatcher) + actions = appendIf(actions, action{"DELETE", itemPath, nameParams, namer, false}, isGracefulDeleter) + // DEPRECATED in 1.11 + actions = appendIf(actions, action{"WATCH", "watch/" + itemPath, nameParams, namer, false}, isWatcher) + actions = appendIf(actions, action{"CONNECT", itemPath, nameParams, namer, false}, isConnecter) + actions = appendIf(actions, action{"CONNECT", itemPath + "/{path:*}", proxyParams, namer, false}, isConnecter && connectSubpath) + default: + namespaceParamName := "namespaces" + // Handler for standard REST verbs (GET, PUT, POST and DELETE). + namespaceParam := ws.PathParameter("namespace", "object name and auth scope, such as for teams and projects").DataType("string") + namespacedPath := namespaceParamName + "/{namespace}/" + resource + namespaceParams := []*restful.Parameter{namespaceParam} + + resourcePath := namespacedPath + resourceParams := namespaceParams + itemPath := namespacedPath + "/{name}" + nameParams := append(namespaceParams, nameParam) + proxyParams := append(nameParams, pathParam) + itemPathSuffix := "" + if isSubresource { + itemPathSuffix = "/" + subresource + itemPath = itemPath + itemPathSuffix + resourcePath = itemPath + resourceParams = nameParams + } + apiResource.Name = path + apiResource.Namespaced = true + apiResource.Kind = resourceKind + namer := handlers.ContextBasedNaming{ + SelfLinker: a.group.Linker, + ClusterScoped: false, + SelfLinkPathPrefix: gpath.Join(a.prefix, namespaceParamName) + "/", + SelfLinkPathSuffix: itemPathSuffix, + } + + actions = appendIf(actions, action{"LIST", resourcePath, resourceParams, namer, false}, isLister) + actions = appendIf(actions, action{"POST", resourcePath, resourceParams, namer, false}, isCreater) + actions = appendIf(actions, action{"DELETECOLLECTION", resourcePath, resourceParams, namer, false}, isCollectionDeleter) + // DEPRECATED in 1.11 + actions = appendIf(actions, action{"WATCHLIST", "watch/" + resourcePath, resourceParams, namer, false}, allowWatchList) + + actions = appendIf(actions, action{"GET", itemPath, nameParams, namer, false}, isGetter) + if getSubpath { + actions = appendIf(actions, action{"GET", itemPath + "/{path:*}", proxyParams, namer, false}, isGetter) + } + actions = appendIf(actions, action{"PUT", itemPath, nameParams, namer, false}, isUpdater) + actions = appendIf(actions, action{"PATCH", itemPath, nameParams, namer, false}, isPatcher) + actions = appendIf(actions, action{"DELETE", itemPath, nameParams, namer, false}, isGracefulDeleter) + // DEPRECATED in 1.11 + actions = appendIf(actions, action{"WATCH", "watch/" + itemPath, nameParams, namer, false}, isWatcher) + actions = appendIf(actions, action{"CONNECT", itemPath, nameParams, namer, false}, isConnecter) + actions = appendIf(actions, action{"CONNECT", itemPath + "/{path:*}", proxyParams, namer, false}, isConnecter && connectSubpath) + + // list or post across namespace. + // For ex: LIST all pods in all namespaces by sending a LIST request at /api/apiVersion/pods. + // TODO: more strongly type whether a resource allows these actions on "all namespaces" (bulk delete) + if !isSubresource { + actions = appendIf(actions, action{"LIST", resource, params, namer, true}, isLister) + // DEPRECATED in 1.11 + actions = appendIf(actions, action{"WATCHLIST", "watch/" + resource, params, namer, true}, allowWatchList) + } + } + + var resourceInfo *storageversion.ResourceInfo + if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionAPI) && + utilfeature.DefaultFeatureGate.Enabled(features.APIServerIdentity) && + isStorageVersionProvider && + storageVersionProvider.StorageVersion() != nil { + + versioner := storageVersionProvider.StorageVersion() + encodingGVK, err := getStorageVersionKind(versioner, storage, a.group.Typer) + if err != nil { + return nil, nil, err + } + resourceInfo = &storageversion.ResourceInfo{ + GroupResource: schema.GroupResource{ + Group: a.group.GroupVersion.Group, + Resource: apiResource.Name, + }, + EncodingVersion: encodingGVK.GroupVersion().String(), + // We record EquivalentResourceMapper first instead of calculate + // DecodableVersions immediately because API installation must + // be completed first for us to know equivalent APIs + EquivalentResourceMapper: a.group.EquivalentResourceRegistry, + } + } + + // Create Routes for the actions. + // TODO: Add status documentation using Returns() + // Errors (see api/errors/errors.go as well as go-restful router): + // http.StatusNotFound, http.StatusMethodNotAllowed, + // http.StatusUnsupportedMediaType, http.StatusNotAcceptable, + // http.StatusBadRequest, http.StatusUnauthorized, http.StatusForbidden, + // http.StatusRequestTimeout, http.StatusConflict, http.StatusPreconditionFailed, + // http.StatusUnprocessableEntity, http.StatusInternalServerError, + // http.StatusServiceUnavailable + // and api error codes + // Note that if we specify a versioned Status object here, we may need to + // create one for the tests, also + // Success: + // http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent + // + // test/integration/auth_test.go is currently the most comprehensive status code test + + for _, s := range a.group.Serializer.SupportedMediaTypes() { + if len(s.MediaTypeSubType) == 0 || len(s.MediaTypeType) == 0 { + return nil, nil, fmt.Errorf("all serializers in the group Serializer must have MediaTypeType and MediaTypeSubType set: %s", s.MediaType) + } + } + mediaTypes, streamMediaTypes := negotiation.MediaTypesForSerializer(a.group.Serializer) + allMediaTypes := append(mediaTypes, streamMediaTypes...) + ws.Produces(allMediaTypes...) + + kubeVerbs := map[string]struct{}{} + reqScope := handlers.RequestScope{ + Serializer: a.group.Serializer, + ParameterCodec: a.group.ParameterCodec, + Creater: a.group.Creater, + Convertor: a.group.Convertor, + Defaulter: a.group.Defaulter, + Typer: a.group.Typer, + UnsafeConvertor: a.group.UnsafeConvertor, + Authorizer: a.group.Authorizer, + + EquivalentResourceMapper: a.group.EquivalentResourceRegistry, + + // TODO: Check for the interface on storage + TableConvertor: tableProvider, + + // TODO: This seems wrong for cross-group subresources. It makes an assumption that a subresource and its parent are in the same group version. Revisit this. + Resource: a.group.GroupVersion.WithResource(resource), + Subresource: subresource, + Kind: fqKindToRegister, + + HubGroupVersion: schema.GroupVersion{Group: fqKindToRegister.Group, Version: runtime.APIVersionInternal}, + + MetaGroupVersion: metav1.SchemeGroupVersion, + + MaxRequestBodyBytes: a.group.MaxRequestBodyBytes, + } + if a.group.MetaGroupVersion != nil { + reqScope.MetaGroupVersion = *a.group.MetaGroupVersion + } + if a.group.OpenAPIModels != nil && utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { + reqScope.FieldManager, err = fieldmanager.NewDefaultFieldManager( + a.group.TypeConverter, + a.group.UnsafeConvertor, + a.group.Defaulter, + a.group.Creater, + fqKindToRegister, + reqScope.HubGroupVersion, + isSubresource, + ) + if err != nil { + return nil, nil, fmt.Errorf("failed to create field manager: %v", err) + } + } + for _, action := range actions { + producedObject := storageMeta.ProducesObject(action.Verb) + if producedObject == nil { + producedObject = defaultVersionedObject + } + reqScope.Namer = action.Namer + + requestScope := "cluster" + var namespaced string + var operationSuffix string + if apiResource.Namespaced { + requestScope = "namespace" + namespaced = "Namespaced" + } + if strings.HasSuffix(action.Path, "/{path:*}") { + requestScope = "resource" + operationSuffix = operationSuffix + "WithPath" + } + if strings.Index(action.Path, "/{name}") != -1 || action.Verb == "POST" { + requestScope = "resource" + } + if action.AllNamespaces { + requestScope = "cluster" + operationSuffix = operationSuffix + "ForAllNamespaces" + namespaced = "" + } + + if kubeVerb, found := toDiscoveryKubeVerb[action.Verb]; found { + if len(kubeVerb) != 0 { + kubeVerbs[kubeVerb] = struct{}{} + } + } else { + return nil, nil, fmt.Errorf("unknown action verb for discovery: %s", action.Verb) + } + + routes := []*restful.RouteBuilder{} + + // If there is a subresource, kind should be the parent's kind. + if isSubresource { + parentStorage, ok := a.group.Storage[resource] + if !ok { + return nil, nil, fmt.Errorf("missing parent storage: %q", resource) + } + + fqParentKind, err := GetResourceKind(a.group.GroupVersion, parentStorage, a.group.Typer) + if err != nil { + return nil, nil, err + } + kind = fqParentKind.Kind + } + + verbOverrider, needOverride := storage.(StorageMetricsOverride) + + // accumulate endpoint-level warnings + var ( + enableWarningHeaders = utilfeature.DefaultFeatureGate.Enabled(features.WarningHeaders) + + warnings []string + deprecated bool + removedRelease string + ) + + { + versionedPtrWithGVK := versionedPtr.DeepCopyObject() + versionedPtrWithGVK.GetObjectKind().SetGroupVersionKind(fqKindToRegister) + currentMajor, currentMinor, _ := deprecation.MajorMinor(versioninfo.Get()) + deprecated = deprecation.IsDeprecated(versionedPtrWithGVK, currentMajor, currentMinor) + if deprecated { + removedRelease = deprecation.RemovedRelease(versionedPtrWithGVK) + warnings = append(warnings, deprecation.WarningMessage(versionedPtrWithGVK)) + } + } + + switch action.Verb { + case "GET": // Get a resource. + var handler restful.RouteFunction + if isGetterWithOptions { + handler = restfulGetResourceWithOptions(getterWithOptions, reqScope, isSubresource) + } else { + handler = restfulGetResource(getter, exporter, reqScope) + } + + if needOverride { + // need change the reported verb + handler = metrics.InstrumentRouteFunc(verbOverrider.OverrideMetricsVerb(action.Verb), group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, handler) + } else { + handler = metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, handler) + } + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + + doc := "read the specified " + kind + if isSubresource { + doc = "read " + subresource + " of the specified " + kind + } + route := ws.GET(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("read"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). + Returns(http.StatusOK, "OK", producedObject). + Writes(producedObject) + if isGetterWithOptions { + if err := AddObjectParams(ws, route, versionedGetOptions); err != nil { + return nil, nil, err + } + } + if isExporter { + if err := AddObjectParams(ws, route, versionedExportOptions); err != nil { + return nil, nil, err + } + } + addParams(route, action.Params) + routes = append(routes, route) + case "LIST": // List all resources of a kind. + doc := "list objects of kind " + kind + if isSubresource { + doc = "list " + subresource + " of objects of kind " + kind + } + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulListResource(lister, watcher, reqScope, false, a.minRequestTimeout)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.GET(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("list"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), allMediaTypes...)...). + Returns(http.StatusOK, "OK", versionedList). + Writes(versionedList) + if err := AddObjectParams(ws, route, versionedListOptions); err != nil { + return nil, nil, err + } + switch { + case isLister && isWatcher: + doc := "list or watch objects of kind " + kind + if isSubresource { + doc = "list or watch " + subresource + " of objects of kind " + kind + } + route.Doc(doc) + case isWatcher: + doc := "watch objects of kind " + kind + if isSubresource { + doc = "watch " + subresource + "of objects of kind " + kind + } + route.Doc(doc) + } + addParams(route, action.Params) + routes = append(routes, route) + case "PUT": // Update a resource. + doc := "replace the specified " + kind + if isSubresource { + doc = "replace " + subresource + " of the specified " + kind + } + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulUpdateResource(updater, reqScope, admit)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.PUT(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("replace"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). + Returns(http.StatusOK, "OK", producedObject). + // TODO: in some cases, the API may return a v1.Status instead of the versioned object + // but currently go-restful can't handle multiple different objects being returned. + Returns(http.StatusCreated, "Created", producedObject). + Reads(defaultVersionedObject). + Writes(producedObject) + if err := AddObjectParams(ws, route, versionedUpdateOptions); err != nil { + return nil, nil, err + } + addParams(route, action.Params) + routes = append(routes, route) + case "PATCH": // Partially update a resource + doc := "partially update the specified " + kind + if isSubresource { + doc = "partially update " + subresource + " of the specified " + kind + } + supportedTypes := []string{ + string(types.JSONPatchType), + string(types.MergePatchType), + string(types.StrategicMergePatchType), + } + if utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { + supportedTypes = append(supportedTypes, string(types.ApplyPatchType)) + } + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulPatchResource(patcher, reqScope, admit, supportedTypes)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.PATCH(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Consumes(supportedTypes...). + Operation("patch"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). + Returns(http.StatusOK, "OK", producedObject). + Reads(metav1.Patch{}). + Writes(producedObject) + if err := AddObjectParams(ws, route, versionedPatchOptions); err != nil { + return nil, nil, err + } + addParams(route, action.Params) + routes = append(routes, route) + case "POST": // Create a resource. + var handler restful.RouteFunction + if isNamedCreater { + handler = restfulCreateNamedResource(namedCreater, reqScope, admit) + } else { + handler = restfulCreateResource(creater, reqScope, admit) + } + handler = metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, handler) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + article := GetArticleForNoun(kind, " ") + doc := "create" + article + kind + if isSubresource { + doc = "create " + subresource + " of" + article + kind + } + route := ws.POST(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("create"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). + Returns(http.StatusOK, "OK", producedObject). + // TODO: in some cases, the API may return a v1.Status instead of the versioned object + // but currently go-restful can't handle multiple different objects being returned. + Returns(http.StatusCreated, "Created", producedObject). + Returns(http.StatusAccepted, "Accepted", producedObject). + Reads(defaultVersionedObject). + Writes(producedObject) + if err := AddObjectParams(ws, route, versionedCreateOptions); err != nil { + return nil, nil, err + } + addParams(route, action.Params) + routes = append(routes, route) + case "DELETE": // Delete a resource. + article := GetArticleForNoun(kind, " ") + doc := "delete" + article + kind + if isSubresource { + doc = "delete " + subresource + " of" + article + kind + } + deleteReturnType := versionedStatus + if deleteReturnsDeletedObject { + deleteReturnType = producedObject + } + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulDeleteResource(gracefulDeleter, isGracefulDeleter, reqScope, admit)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.DELETE(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("delete"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). + Writes(deleteReturnType). + Returns(http.StatusOK, "OK", deleteReturnType). + Returns(http.StatusAccepted, "Accepted", deleteReturnType) + if isGracefulDeleter { + route.Reads(versionedDeleterObject) + route.ParameterNamed("body").Required(false) + if err := AddObjectParams(ws, route, versionedDeleteOptions); err != nil { + return nil, nil, err + } + } + addParams(route, action.Params) + routes = append(routes, route) + case "DELETECOLLECTION": + doc := "delete collection of " + kind + if isSubresource { + doc = "delete collection of " + subresource + " of a " + kind + } + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulDeleteCollection(collectionDeleter, isCollectionDeleter, reqScope, admit)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.DELETE(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("deletecollection"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). + Writes(versionedStatus). + Returns(http.StatusOK, "OK", versionedStatus) + if isCollectionDeleter { + route.Reads(versionedDeleterObject) + route.ParameterNamed("body").Required(false) + if err := AddObjectParams(ws, route, versionedDeleteOptions); err != nil { + return nil, nil, err + } + } + if err := AddObjectParams(ws, route, versionedListOptions, "watch", "allowWatchBookmarks"); err != nil { + return nil, nil, err + } + addParams(route, action.Params) + routes = append(routes, route) + // deprecated in 1.11 + case "WATCH": // Watch a resource. + doc := "watch changes to an object of kind " + kind + if isSubresource { + doc = "watch changes to " + subresource + " of an object of kind " + kind + } + doc += ". deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter." + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.GET(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("watch"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(allMediaTypes...). + Returns(http.StatusOK, "OK", versionedWatchEvent). + Writes(versionedWatchEvent) + if err := AddObjectParams(ws, route, versionedListOptions); err != nil { + return nil, nil, err + } + addParams(route, action.Params) + routes = append(routes, route) + // deprecated in 1.11 + case "WATCHLIST": // Watch all resources of a kind. + doc := "watch individual changes to a list of " + kind + if isSubresource { + doc = "watch individual changes to a list of " + subresource + " of " + kind + } + doc += ". deprecated: use the 'watch' parameter with a list operation instead." + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.GET(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("watch"+namespaced+kind+strings.Title(subresource)+"List"+operationSuffix). + Produces(allMediaTypes...). + Returns(http.StatusOK, "OK", versionedWatchEvent). + Writes(versionedWatchEvent) + if err := AddObjectParams(ws, route, versionedListOptions); err != nil { + return nil, nil, err + } + addParams(route, action.Params) + routes = append(routes, route) + case "CONNECT": + for _, method := range connecter.ConnectMethods() { + connectProducedObject := storageMeta.ProducesObject(method) + if connectProducedObject == nil { + connectProducedObject = "string" + } + doc := "connect " + method + " requests to " + kind + if isSubresource { + doc = "connect " + method + " requests to " + subresource + " of " + kind + } + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulConnectResource(connecter, reqScope, admit, path, isSubresource)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.Method(method).Path(action.Path). + To(handler). + Doc(doc). + Operation("connect" + strings.Title(strings.ToLower(method)) + namespaced + kind + strings.Title(subresource) + operationSuffix). + Produces("*/*"). + Consumes("*/*"). + Writes(connectProducedObject) + if versionedConnectOptions != nil { + if err := AddObjectParams(ws, route, versionedConnectOptions); err != nil { + return nil, nil, err + } + } + addParams(route, action.Params) + routes = append(routes, route) + + // transform ConnectMethods to kube verbs + if kubeVerb, found := toDiscoveryKubeVerb[method]; found { + if len(kubeVerb) != 0 { + kubeVerbs[kubeVerb] = struct{}{} + } + } + } + default: + return nil, nil, fmt.Errorf("unrecognized action verb: %s", action.Verb) + } + for _, route := range routes { + route.Metadata(ROUTE_META_GVK, metav1.GroupVersionKind{ + Group: reqScope.Kind.Group, + Version: reqScope.Kind.Version, + Kind: reqScope.Kind.Kind, + }) + route.Metadata(ROUTE_META_ACTION, strings.ToLower(action.Verb)) + ws.Route(route) + } + // Note: update GetAuthorizerAttributes() when adding a custom handler. + } + + apiResource.Verbs = make([]string, 0, len(kubeVerbs)) + for kubeVerb := range kubeVerbs { + apiResource.Verbs = append(apiResource.Verbs, kubeVerb) + } + sort.Strings(apiResource.Verbs) + + if shortNamesProvider, ok := storage.(rest.ShortNamesProvider); ok { + apiResource.ShortNames = shortNamesProvider.ShortNames() + } + if categoriesProvider, ok := storage.(rest.CategoriesProvider); ok { + apiResource.Categories = categoriesProvider.Categories() + } + if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok { + gvk := gvkProvider.GroupVersionKind(a.group.GroupVersion) + apiResource.Group = gvk.Group + apiResource.Version = gvk.Version + apiResource.Kind = gvk.Kind + } + + // Record the existence of the GVR and the corresponding GVK + a.group.EquivalentResourceRegistry.RegisterKindFor(reqScope.Resource, reqScope.Subresource, fqKindToRegister) + + return &apiResource, resourceInfo, nil +} + +// indirectArbitraryPointer returns *ptrToObject for an arbitrary pointer +func indirectArbitraryPointer(ptrToObject interface{}) interface{} { + return reflect.Indirect(reflect.ValueOf(ptrToObject)).Interface() +} + +func appendIf(actions []action, a action, shouldAppend bool) []action { + if shouldAppend { + actions = append(actions, a) + } + return actions +} + +func addParams(route *restful.RouteBuilder, params []*restful.Parameter) { + for _, param := range params { + route.Param(param) + } +} + +// AddObjectParams converts a runtime.Object into a set of go-restful Param() definitions on the route. +// The object must be a pointer to a struct; only fields at the top level of the struct that are not +// themselves interfaces or structs are used; only fields with a json tag that is non empty (the standard +// Go JSON behavior for omitting a field) become query parameters. The name of the query parameter is +// the JSON field name. If a description struct tag is set on the field, that description is used on the +// query parameter. In essence, it converts a standard JSON top level object into a query param schema. +func AddObjectParams(ws *restful.WebService, route *restful.RouteBuilder, obj interface{}, excludedNames ...string) error { + sv, err := conversion.EnforcePtr(obj) + if err != nil { + return err + } + st := sv.Type() + excludedNameSet := sets.NewString(excludedNames...) + switch st.Kind() { + case reflect.Struct: + for i := 0; i < st.NumField(); i++ { + name := st.Field(i).Name + sf, ok := st.FieldByName(name) + if !ok { + continue + } + switch sf.Type.Kind() { + case reflect.Interface, reflect.Struct: + case reflect.Ptr: + // TODO: This is a hack to let metav1.Time through. This needs to be fixed in a more generic way eventually. bug #36191 + if (sf.Type.Elem().Kind() == reflect.Interface || sf.Type.Elem().Kind() == reflect.Struct) && strings.TrimPrefix(sf.Type.String(), "*") != "metav1.Time" { + continue + } + fallthrough + default: + jsonTag := sf.Tag.Get("json") + if len(jsonTag) == 0 { + continue + } + jsonName := strings.SplitN(jsonTag, ",", 2)[0] + if len(jsonName) == 0 { + continue + } + if excludedNameSet.Has(jsonName) { + continue + } + var desc string + if docable, ok := obj.(documentable); ok { + desc = docable.SwaggerDoc()[jsonName] + } + route.Param(ws.QueryParameter(jsonName, desc).DataType(typeToJSON(sf.Type.String()))) + } + } + } + return nil +} + +// TODO: this is incomplete, expand as needed. +// Convert the name of a golang type to the name of a JSON type +func typeToJSON(typeName string) string { + switch typeName { + case "bool", "*bool": + return "boolean" + case "uint8", "*uint8", "int", "*int", "int32", "*int32", "int64", "*int64", "uint32", "*uint32", "uint64", "*uint64": + return "integer" + case "float64", "*float64", "float32", "*float32": + return "number" + case "metav1.Time", "*metav1.Time": + return "string" + case "byte", "*byte": + return "string" + case "v1.DeletionPropagation", "*v1.DeletionPropagation": + return "string" + case "v1.ResourceVersionMatch", "*v1.ResourceVersionMatch": + return "string" + case "v1.IncludeObjectPolicy", "*v1.IncludeObjectPolicy": + return "string" + + // TODO: Fix these when go-restful supports a way to specify an array query param: + // https://github.com/emicklei/go-restful/issues/225 + case "[]string", "[]*string": + return "string" + case "[]int32", "[]*int32": + return "integer" + + default: + return typeName + } +} + +// defaultStorageMetadata provides default answers to rest.StorageMetadata. +type defaultStorageMetadata struct{} + +// defaultStorageMetadata implements rest.StorageMetadata +var _ rest.StorageMetadata = defaultStorageMetadata{} + +func (defaultStorageMetadata) ProducesMIMETypes(verb string) []string { + return nil +} + +func (defaultStorageMetadata) ProducesObject(verb string) interface{} { + return nil +} + +// splitSubresource checks if the given storage path is the path of a subresource and returns +// the resource and subresource components. +func splitSubresource(path string) (string, string, error) { + var resource, subresource string + switch parts := strings.Split(path, "/"); len(parts) { + case 2: + resource, subresource = parts[0], parts[1] + case 1: + resource = parts[0] + default: + // TODO: support deeper paths + return "", "", fmt.Errorf("api_installer allows only one or two segment paths (resource or resource/subresource)") + } + return resource, subresource, nil +} + +// GetArticleForNoun returns the article needed for the given noun. +func GetArticleForNoun(noun string, padding string) string { + if !strings.HasSuffix(noun, "ss") && strings.HasSuffix(noun, "s") { + // Plurals don't have an article. + // Don't catch words like class + return fmt.Sprintf("%v", padding) + } + + article := "a" + if isVowel(rune(noun[0])) { + article = "an" + } + + return fmt.Sprintf("%s%s%s", padding, article, padding) +} + +// isVowel returns true if the rune is a vowel (case insensitive). +func isVowel(c rune) bool { + vowels := []rune{'a', 'e', 'i', 'o', 'u'} + for _, value := range vowels { + if value == unicode.ToLower(c) { + return true + } + } + return false +} + +func restfulListResource(r rest.Lister, rw rest.Watcher, scope handlers.RequestScope, forceWatch bool, minRequestTimeout time.Duration) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.ListResource(r, rw, &scope, forceWatch, minRequestTimeout)(res.ResponseWriter, req.Request) + } +} + +func restfulCreateNamedResource(r rest.NamedCreater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.CreateNamedResource(r, &scope, admit)(res.ResponseWriter, req.Request) + } +} + +func restfulCreateResource(r rest.Creater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.CreateResource(r, &scope, admit)(res.ResponseWriter, req.Request) + } +} + +func restfulDeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.DeleteResource(r, allowsOptions, &scope, admit)(res.ResponseWriter, req.Request) + } +} + +func restfulDeleteCollection(r rest.CollectionDeleter, checkBody bool, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.DeleteCollection(r, checkBody, &scope, admit)(res.ResponseWriter, req.Request) + } +} + +func restfulUpdateResource(r rest.Updater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.UpdateResource(r, &scope, admit)(res.ResponseWriter, req.Request) + } +} + +func restfulPatchResource(r rest.Patcher, scope handlers.RequestScope, admit admission.Interface, supportedTypes []string) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.PatchResource(r, &scope, admit, supportedTypes)(res.ResponseWriter, req.Request) + } +} + +func restfulGetResource(r rest.Getter, e rest.Exporter, scope handlers.RequestScope) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.GetResource(r, e, &scope)(res.ResponseWriter, req.Request) + } +} + +func restfulGetResourceWithOptions(r rest.GetterWithOptions, scope handlers.RequestScope, isSubresource bool) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.GetResourceWithOptions(r, &scope, isSubresource)(res.ResponseWriter, req.Request) + } +} + +func restfulConnectResource(connecter rest.Connecter, scope handlers.RequestScope, admit admission.Interface, restPath string, isSubresource bool) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.ConnectResource(connecter, &scope, admit, restPath, isSubresource)(res.ResponseWriter, req.Request) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/OWNERS b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/OWNERS new file mode 100644 index 0000000000..33c780d7a6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- wojtek-t +- jimmidyson diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go new file mode 100644 index 0000000000..d4f6068b40 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -0,0 +1,707 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "bufio" + "net" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "sync" + "time" + + restful "github.com/emicklei/go-restful" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/types" + utilsets "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +// resettableCollector is the interface implemented by prometheus.MetricVec +// that can be used by Prometheus to collect metrics and reset their values. +type resettableCollector interface { + compbasemetrics.Registerable + Reset() +} + +const ( + APIServerComponent string = "apiserver" + OtherContentType string = "other" + OtherRequestMethod string = "other" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + deprecatedRequestGauge = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "apiserver_requested_deprecated_apis", + Help: "Gauge of deprecated APIs that have been requested, broken out by API group, version, resource, subresource, and removed_release.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"group", "version", "resource", "subresource", "removed_release"}, + ) + + // TODO(a-robinson): Add unit tests for the handling of these metrics once + // the upstream library supports it. + requestCounter = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_request_total", + Help: "Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, and HTTP response contentType and code.", + StabilityLevel: compbasemetrics.ALPHA, + }, + // The label_name contentType doesn't follow the label_name convention defined here: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/instrumentation.md + // But changing it would break backwards compatibility. Future label_names + // should be all lowercase and separated by underscores. + []string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component", "contentType", "code"}, + ) + longRunningRequestGauge = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "apiserver_longrunning_gauge", + Help: "Gauge of all active long-running apiserver requests broken out by verb, group, version, resource, scope and component. Not all requests are tracked this way.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, + ) + requestLatencies = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Name: "apiserver_request_duration_seconds", + Help: "Response latency distribution in seconds for each verb, dry run value, group, version, resource, subresource, scope and component.", + // This metric is used for verifying api call latencies SLO, + // as well as tracking regressions in this aspects. + // Thus we customize buckets significantly, to empower both usecases. + Buckets: []float64{0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, + 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component"}, + ) + responseSizes = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Name: "apiserver_response_sizes", + Help: "Response size distribution in bytes for each group, version, verb, resource, subresource, scope and component.", + // Use buckets ranging from 1000 bytes (1KB) to 10^9 bytes (1GB). + Buckets: compbasemetrics.ExponentialBuckets(1000, 10.0, 7), + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, + ) + // DroppedRequests is a number of requests dropped with 'Try again later' response" + DroppedRequests = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_dropped_requests_total", + Help: "Number of requests dropped with 'Try again later' response", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"request_kind"}, + ) + // TLSHandshakeErrors is a number of requests dropped with 'TLS handshake error from' error + TLSHandshakeErrors = compbasemetrics.NewCounter( + &compbasemetrics.CounterOpts{ + Name: "apiserver_tls_handshake_errors_total", + Help: "Number of requests dropped with 'TLS handshake error from' error", + StabilityLevel: compbasemetrics.ALPHA, + }, + ) + // RegisteredWatchers is a number of currently registered watchers splitted by resource. + RegisteredWatchers = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "apiserver_registered_watchers", + Help: "Number of currently registered watchers for a given resources", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"group", "version", "kind"}, + ) + WatchEvents = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_watch_events_total", + Help: "Number of events sent in watch clients", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"group", "version", "kind"}, + ) + WatchEventsSizes = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Name: "apiserver_watch_events_sizes", + Help: "Watch event size distribution in bytes", + Buckets: compbasemetrics.ExponentialBuckets(1024, 2.0, 8), // 1K, 2K, 4K, 8K, ..., 128K. + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"group", "version", "kind"}, + ) + // Because of volatility of the base metric this is pre-aggregated one. Instead of reporting current usage all the time + // it reports maximal usage during the last second. + currentInflightRequests = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "apiserver_current_inflight_requests", + Help: "Maximal number of currently used inflight request limit of this apiserver per request kind in last second.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"request_kind"}, + ) + currentInqueueRequests = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "apiserver_current_inqueue_requests", + Help: "Maximal number of queued requests in this apiserver per request kind in last second.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"request_kind"}, + ) + + requestTerminationsTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_request_terminations_total", + Help: "Number of requests which apiserver terminated in self-defense.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"verb", "group", "version", "resource", "subresource", "scope", "component", "code"}, + ) + + apiSelfRequestCounter = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_selfrequest_total", + Help: "Counter of apiserver self-requests broken out for each verb, API resource and subresource.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"verb", "resource", "subresource"}, + ) + + requestFilterDuration = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Name: "apiserver_request_filter_duration_seconds", + Help: "Request filter latency distribution in seconds, for each filter type", + Buckets: []float64{0.0001, 0.0003, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1.0, 5.0}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"filter"}, + ) + + // requestAbortsTotal is a number of aborted requests with http.ErrAbortHandler + requestAbortsTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_request_aborts_total", + Help: "Number of requests which apiserver aborted possibly due to a timeout, for each group, version, verb, resource, subresource and scope", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"verb", "group", "version", "resource", "subresource", "scope"}, + ) + + kubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\.exe))`) + + metrics = []resettableCollector{ + deprecatedRequestGauge, + requestCounter, + longRunningRequestGauge, + requestLatencies, + responseSizes, + DroppedRequests, + TLSHandshakeErrors, + RegisteredWatchers, + WatchEvents, + WatchEventsSizes, + currentInflightRequests, + currentInqueueRequests, + requestTerminationsTotal, + apiSelfRequestCounter, + requestFilterDuration, + requestAbortsTotal, + } + + // these are the known (e.g. whitelisted/known) content types which we will report for + // request metrics. Any other RFC compliant content types will be aggregated under 'unknown' + knownMetricContentTypes = utilsets.NewString( + "application/apply-patch+yaml", + "application/json", + "application/json-patch+json", + "application/merge-patch+json", + "application/strategic-merge-patch+json", + "application/vnd.kubernetes.protobuf", + "application/vnd.kubernetes.protobuf;stream=watch", + "application/yaml", + "text/plain", + "text/plain;charset=utf-8") + // these are the valid request methods which we report in our metrics. Any other request methods + // will be aggregated under 'unknown' + validRequestMethods = utilsets.NewString( + "APPLY", + "CONNECT", + "CREATE", + "DELETE", + "DELETECOLLECTION", + "GET", + "LIST", + "PATCH", + "POST", + "PROXY", + "PUT", + "UPDATE", + "WATCH", + "WATCHLIST") +) + +const ( + // ReadOnlyKind is a string identifying read only request kind + ReadOnlyKind = "readOnly" + // MutatingKind is a string identifying mutating request kind + MutatingKind = "mutating" + + // WaitingPhase is the phase value for a request waiting in a queue + WaitingPhase = "waiting" + // ExecutingPhase is the phase value for an executing request + ExecutingPhase = "executing" +) + +const ( + // deprecatedAnnotationKey is a key for an audit annotation set to + // "true" on requests made to deprecated API versions + deprecatedAnnotationKey = "k8s.io/deprecated" + // removedReleaseAnnotationKey is a key for an audit annotation set to + // the target removal release, in "." format, + // on requests made to deprecated API versions with a target removal release + removedReleaseAnnotationKey = "k8s.io/removed-release" +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + registerMetrics.Do(func() { + for _, metric := range metrics { + legacyregistry.MustRegister(metric) + } + }) +} + +// Reset all metrics. +func Reset() { + for _, metric := range metrics { + metric.Reset() + } +} + +// UpdateInflightRequestMetrics reports concurrency metrics classified by +// mutating vs Readonly. +func UpdateInflightRequestMetrics(phase string, nonmutating, mutating int) { + for _, kc := range []struct { + kind string + count int + }{{ReadOnlyKind, nonmutating}, {MutatingKind, mutating}} { + if phase == ExecutingPhase { + currentInflightRequests.WithLabelValues(kc.kind).Set(float64(kc.count)) + } else { + currentInqueueRequests.WithLabelValues(kc.kind).Set(float64(kc.count)) + } + } +} + +func RecordFilterLatency(name string, elapsed time.Duration) { + requestFilterDuration.WithLabelValues(name).Observe(elapsed.Seconds()) +} + +// RecordRequestAbort records that the request was aborted possibly due to a timeout. +func RecordRequestAbort(req *http.Request, requestInfo *request.RequestInfo) { + if requestInfo == nil { + requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} + } + + scope := CleanScope(requestInfo) + reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) + resource := requestInfo.Resource + subresource := requestInfo.Subresource + group := requestInfo.APIGroup + version := requestInfo.APIVersion + + requestAbortsTotal.WithLabelValues(reportedVerb, group, version, resource, subresource, scope).Inc() +} + +// RecordRequestTermination records that the request was terminated early as part of a resource +// preservation or apiserver self-defense mechanism (e.g. timeouts, maxinflight throttling, +// proxyHandler errors). RecordRequestTermination should only be called zero or one times +// per request. +func RecordRequestTermination(req *http.Request, requestInfo *request.RequestInfo, component string, code int) { + if requestInfo == nil { + requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} + } + scope := CleanScope(requestInfo) + + // We don't use verb from , as this may be propagated from + // InstrumentRouteFunc which is registered in installer.go with predefined + // list of verbs (different than those translated to RequestInfo). + // However, we need to tweak it e.g. to differentiate GET from LIST. + reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) + + if requestInfo.IsResourceRequest { + requestTerminationsTotal.WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc() + } else { + requestTerminationsTotal.WithLabelValues(reportedVerb, "", "", "", requestInfo.Path, scope, component, codeToString(code)).Inc() + } +} + +// RecordLongRunning tracks the execution of a long running request against the API server. It provides an accurate count +// of the total number of open long running requests. requestInfo may be nil if the caller is not in the normal request flow. +func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, component string, fn func()) { + if requestInfo == nil { + requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} + } + var g compbasemetrics.GaugeMetric + scope := CleanScope(requestInfo) + + // We don't use verb from , as this may be propagated from + // InstrumentRouteFunc which is registered in installer.go with predefined + // list of verbs (different than those translated to RequestInfo). + // However, we need to tweak it e.g. to differentiate GET from LIST. + reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) + + if requestInfo.IsResourceRequest { + g = longRunningRequestGauge.WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component) + } else { + g = longRunningRequestGauge.WithLabelValues(reportedVerb, "", "", "", requestInfo.Path, scope, component) + } + g.Inc() + defer g.Dec() + fn() +} + +// MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record +// a request. verb must be uppercase to be backwards compatible with existing monitoring tooling. +func MonitorRequest(req *http.Request, verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, contentType string, httpCode, respSize int, elapsed time.Duration) { + // We don't use verb from , as this may be propagated from + // InstrumentRouteFunc which is registered in installer.go with predefined + // list of verbs (different than those translated to RequestInfo). + // However, we need to tweak it e.g. to differentiate GET from LIST. + reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) + + dryRun := cleanDryRun(req.URL) + elapsedSeconds := elapsed.Seconds() + cleanContentType := cleanContentType(contentType) + requestCounter.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, cleanContentType, codeToString(httpCode)).Inc() + // MonitorRequest happens after authentication, so we can trust the username given by the request + info, ok := request.UserFrom(req.Context()) + if ok && info.GetName() == user.APIServerUser { + apiSelfRequestCounter.WithLabelValues(reportedVerb, resource, subresource).Inc() + } + if deprecated { + deprecatedRequestGauge.WithLabelValues(group, version, resource, subresource, removedRelease).Set(1) + audit.AddAuditAnnotation(req.Context(), deprecatedAnnotationKey, "true") + if len(removedRelease) > 0 { + audit.AddAuditAnnotation(req.Context(), removedReleaseAnnotationKey, removedRelease) + } + } + requestLatencies.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component).Observe(elapsedSeconds) + // We are only interested in response sizes of read requests. + if verb == "GET" || verb == "LIST" { + responseSizes.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(float64(respSize)) + } +} + +// InstrumentRouteFunc works like Prometheus' InstrumentHandlerFunc but wraps +// the go-restful RouteFunction instead of a HandlerFunc plus some Kubernetes endpoint specific information. +func InstrumentRouteFunc(verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, routeFunc restful.RouteFunction) restful.RouteFunction { + return restful.RouteFunction(func(req *restful.Request, response *restful.Response) { + requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(req.Request.Context()) + if !ok { + requestReceivedTimestamp = time.Now() + } + + delegate := &ResponseWriterDelegator{ResponseWriter: response.ResponseWriter} + + _, cn := response.ResponseWriter.(http.CloseNotifier) + _, fl := response.ResponseWriter.(http.Flusher) + _, hj := response.ResponseWriter.(http.Hijacker) + var rw http.ResponseWriter + if cn && fl && hj { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + response.ResponseWriter = rw + + routeFunc(req, response) + + MonitorRequest(req.Request, verb, group, version, resource, subresource, scope, component, deprecated, removedRelease, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(requestReceivedTimestamp)) + }) +} + +// InstrumentHandlerFunc works like Prometheus' InstrumentHandlerFunc but adds some Kubernetes endpoint specific information. +func InstrumentHandlerFunc(verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, handler http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(req.Context()) + if !ok { + requestReceivedTimestamp = time.Now() + } + + delegate := &ResponseWriterDelegator{ResponseWriter: w} + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + if cn && fl && hj { + w = &fancyResponseWriterDelegator{delegate} + } else { + w = delegate + } + + handler(w, req) + + MonitorRequest(req, verb, group, version, resource, subresource, scope, component, deprecated, removedRelease, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(requestReceivedTimestamp)) + } +} + +// cleanContentType binds the contentType (for metrics related purposes) to a +// bounded set of known/expected content-types. +func cleanContentType(contentType string) string { + normalizedContentType := strings.ToLower(contentType) + if strings.HasSuffix(contentType, " stream=watch") || strings.HasSuffix(contentType, " charset=utf-8") { + normalizedContentType = strings.ReplaceAll(contentType, " ", "") + } + if knownMetricContentTypes.Has(normalizedContentType) { + return normalizedContentType + } + return OtherContentType +} + +// CleanScope returns the scope of the request. +func CleanScope(requestInfo *request.RequestInfo) string { + if requestInfo.Namespace != "" { + return "namespace" + } + if requestInfo.Name != "" { + return "resource" + } + if requestInfo.IsResourceRequest { + return "cluster" + } + // this is the empty scope + return "" +} + +func canonicalVerb(verb string, scope string) string { + switch verb { + case "GET", "HEAD": + if scope != "resource" && scope != "" { + return "LIST" + } + return "GET" + default: + return verb + } +} + +func cleanVerb(verb string, request *http.Request) string { + reportedVerb := verb + if verb == "LIST" { + // see apimachinery/pkg/runtime/conversion.go Convert_Slice_string_To_bool + if values := request.URL.Query()["watch"]; len(values) > 0 { + if value := strings.ToLower(values[0]); value != "0" && value != "false" { + reportedVerb = "WATCH" + } + } + } + // normalize the legacy WATCHLIST to WATCH to ensure users aren't surprised by metrics + if verb == "WATCHLIST" { + reportedVerb = "WATCH" + } + if verb == "PATCH" && request.Header.Get("Content-Type") == string(types.ApplyPatchType) && utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { + reportedVerb = "APPLY" + } + if validRequestMethods.Has(reportedVerb) { + return reportedVerb + } + return OtherRequestMethod +} + +func cleanDryRun(u *url.URL) string { + // avoid allocating when we don't see dryRun in the query + if !strings.Contains(u.RawQuery, "dryRun") { + return "" + } + dryRun := u.Query()["dryRun"] + if errs := validation.ValidateDryRun(nil, dryRun); len(errs) > 0 { + return "invalid" + } + // Since dryRun could be valid with any arbitrarily long length + // we have to dedup and sort the elements before joining them together + // TODO: this is a fairly large allocation for what it does, consider + // a sort and dedup in a single pass + return strings.Join(utilsets.NewString(dryRun...).List(), ",") +} + +// ResponseWriterDelegator interface wraps http.ResponseWriter to additionally record content-length, status-code, etc. +type ResponseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool +} + +func (r *ResponseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *ResponseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +func (r *ResponseWriterDelegator) Status() int { + return r.status +} + +func (r *ResponseWriterDelegator) ContentLength() int { + return int(r.written) +} + +type fancyResponseWriterDelegator struct { + *ResponseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +// Small optimization over Itoa +func codeToString(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/openapi/OWNERS b/vendor/k8s.io/apiserver/pkg/endpoints/openapi/OWNERS new file mode 100644 index 0000000000..006f0125e1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/openapi/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- mbohlool diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go b/vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go new file mode 100644 index 0000000000..e3bd028bbf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go @@ -0,0 +1,191 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strings" + "unicode" + + restful "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" + + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/util" +) + +var verbs = util.NewTrie([]string{"get", "log", "read", "replace", "patch", "delete", "deletecollection", "watch", "connect", "proxy", "list", "create", "patch"}) + +const ( + extensionGVK = "x-kubernetes-group-version-kind" +) + +// ToValidOperationID makes an string a valid op ID (e.g. removing punctuations and whitespaces and make it camel case) +func ToValidOperationID(s string, capitalizeFirstLetter bool) string { + var buffer bytes.Buffer + capitalize := capitalizeFirstLetter + for i, r := range s { + if unicode.IsLetter(r) || r == '_' || (i != 0 && unicode.IsDigit(r)) { + if capitalize { + buffer.WriteRune(unicode.ToUpper(r)) + capitalize = false + } else { + buffer.WriteRune(r) + } + } else { + capitalize = true + } + } + return buffer.String() +} + +// GetOperationIDAndTags returns a customize operation ID and a list of tags for kubernetes API server's OpenAPI spec to prevent duplicate IDs. +func GetOperationIDAndTags(r *restful.Route) (string, []string, error) { + op := r.Operation + path := r.Path + var tags []string + prefix, exists := verbs.GetPrefix(op) + if !exists { + return op, tags, fmt.Errorf("operation names should start with a verb. Cannot determine operation verb from %v", op) + } + op = op[len(prefix):] + parts := strings.Split(strings.Trim(path, "/"), "/") + // Assume /api is /apis/core, remove this when we actually server /api/... on /apis/core/... + if len(parts) >= 1 && parts[0] == "api" { + parts = append([]string{"apis", "core"}, parts[1:]...) + } + if len(parts) >= 2 && parts[0] == "apis" { + trimmed := strings.TrimSuffix(parts[1], ".k8s.io") + prefix = prefix + ToValidOperationID(trimmed, prefix != "") + tag := ToValidOperationID(trimmed, false) + if len(parts) > 2 { + prefix = prefix + ToValidOperationID(parts[2], prefix != "") + tag = tag + "_" + ToValidOperationID(parts[2], false) + } + tags = append(tags, tag) + } else if len(parts) >= 1 { + tags = append(tags, ToValidOperationID(parts[0], false)) + } + return prefix + ToValidOperationID(op, prefix != ""), tags, nil +} + +type groupVersionKinds []v1.GroupVersionKind + +func (s groupVersionKinds) Len() int { + return len(s) +} + +func (s groupVersionKinds) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s groupVersionKinds) Less(i, j int) bool { + if s[i].Group == s[j].Group { + if s[i].Version == s[j].Version { + return s[i].Kind < s[j].Kind + } + return s[i].Version < s[j].Version + } + return s[i].Group < s[j].Group +} + +func (s groupVersionKinds) JSON() []interface{} { + j := []interface{}{} + for _, gvk := range s { + j = append(j, map[string]interface{}{ + "group": gvk.Group, + "version": gvk.Version, + "kind": gvk.Kind, + }) + } + return j +} + +// DefinitionNamer is the type to customize OpenAPI definition name. +type DefinitionNamer struct { + typeGroupVersionKinds map[string]groupVersionKinds +} + +func gvkConvert(gvk schema.GroupVersionKind) v1.GroupVersionKind { + return v1.GroupVersionKind{ + Group: gvk.Group, + Version: gvk.Version, + Kind: gvk.Kind, + } +} + +func friendlyName(name string) string { + nameParts := strings.Split(name, "/") + // Reverse first part. e.g., io.k8s... instead of k8s.io... + if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { + parts := strings.Split(nameParts[0], ".") + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + nameParts[0] = strings.Join(parts, ".") + } + return strings.Join(nameParts, ".") +} + +func typeName(t reflect.Type) string { + path := t.PkgPath() + if strings.Contains(path, "/vendor/") { + path = path[strings.Index(path, "/vendor/")+len("/vendor/"):] + } + return fmt.Sprintf("%s.%s", path, t.Name()) +} + +// NewDefinitionNamer constructs a new DefinitionNamer to be used to customize OpenAPI spec. +func NewDefinitionNamer(schemes ...*runtime.Scheme) *DefinitionNamer { + ret := &DefinitionNamer{ + typeGroupVersionKinds: map[string]groupVersionKinds{}, + } + for _, s := range schemes { + for gvk, rtype := range s.AllKnownTypes() { + newGVK := gvkConvert(gvk) + exists := false + for _, existingGVK := range ret.typeGroupVersionKinds[typeName(rtype)] { + if newGVK == existingGVK { + exists = true + break + } + } + if !exists { + ret.typeGroupVersionKinds[typeName(rtype)] = append(ret.typeGroupVersionKinds[typeName(rtype)], newGVK) + } + } + } + for _, gvk := range ret.typeGroupVersionKinds { + sort.Sort(gvk) + } + return ret +} + +// GetDefinitionName returns the name and tags for a given definition +func (d *DefinitionNamer) GetDefinitionName(name string) (string, spec.Extensions) { + if groupVersionKinds, ok := d.typeGroupVersionKinds[name]; ok { + return friendlyName(name), spec.Extensions{ + extensionGVK: groupVersionKinds.JSON(), + } + } + return friendlyName(name), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS b/vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS new file mode 100644 index 0000000000..4da107c8c3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- sttts diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go new file mode 100644 index 0000000000..fe3ae38edc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go @@ -0,0 +1,96 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authentication/user" +) + +// The key type is unexported to prevent collisions +type key int + +const ( + // namespaceKey is the context key for the request namespace. + namespaceKey key = iota + + // userKey is the context key for the request user. + userKey + + // auditKey is the context key for the audit event. + auditKey + + // audiencesKey is the context key for request audiences. + audiencesKey +) + +// NewContext instantiates a base context object for request flows. +func NewContext() context.Context { + return context.TODO() +} + +// NewDefaultContext instantiates a base context object for request flows in the default namespace +func NewDefaultContext() context.Context { + return WithNamespace(NewContext(), metav1.NamespaceDefault) +} + +// WithValue returns a copy of parent in which the value associated with key is val. +func WithValue(parent context.Context, key interface{}, val interface{}) context.Context { + return context.WithValue(parent, key, val) +} + +// WithNamespace returns a copy of parent in which the namespace value is set +func WithNamespace(parent context.Context, namespace string) context.Context { + return WithValue(parent, namespaceKey, namespace) +} + +// NamespaceFrom returns the value of the namespace key on the ctx +func NamespaceFrom(ctx context.Context) (string, bool) { + namespace, ok := ctx.Value(namespaceKey).(string) + return namespace, ok +} + +// NamespaceValue returns the value of the namespace key on the ctx, or the empty string if none +func NamespaceValue(ctx context.Context) string { + namespace, _ := NamespaceFrom(ctx) + return namespace +} + +// WithUser returns a copy of parent in which the user value is set +func WithUser(parent context.Context, user user.Info) context.Context { + return WithValue(parent, userKey, user) +} + +// UserFrom returns the value of the user key on the ctx +func UserFrom(ctx context.Context) (user.Info, bool) { + user, ok := ctx.Value(userKey).(user.Info) + return user, ok +} + +// WithAuditEvent returns set audit event struct. +func WithAuditEvent(parent context.Context, ev *audit.Event) context.Context { + return WithValue(parent, auditKey, ev) +} + +// AuditEventFrom returns the audit event struct on the ctx +func AuditEventFrom(ctx context.Context) *audit.Event { + ev, _ := ctx.Value(auditKey).(*audit.Event) + return ev +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go new file mode 100644 index 0000000000..96da6f2b9c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package request contains everything around extracting info from +// a http request object. +// TODO: this package is temporary. Handlers must move into pkg/apiserver/handlers to avoid dependency cycle +package request // import "k8s.io/apiserver/pkg/endpoints/request" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go new file mode 100644 index 0000000000..7d58cf3ad9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "context" + "time" +) + +type requestReceivedTimestampKeyType int + +// requestReceivedTimestampKey is the ReceivedTimestamp (the time the request reached the apiserver) +// key for the context. +const requestReceivedTimestampKey requestReceivedTimestampKeyType = iota + +// WithReceivedTimestamp returns a copy of parent context in which the ReceivedTimestamp +// (the time the request reached the apiserver) is set. +// +// If the specified ReceivedTimestamp is zero, no value is set and the parent context is returned as is. +func WithReceivedTimestamp(parent context.Context, receivedTimestamp time.Time) context.Context { + if receivedTimestamp.IsZero() { + return parent + } + return WithValue(parent, requestReceivedTimestampKey, receivedTimestamp) +} + +// ReceivedTimestampFrom returns the value of the ReceivedTimestamp key from the specified context. +func ReceivedTimestampFrom(ctx context.Context) (time.Time, bool) { + info, ok := ctx.Value(requestReceivedTimestampKey).(time.Time) + return info, ok +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go new file mode 100644 index 0000000000..1f5dc28a9e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -0,0 +1,274 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "context" + "fmt" + "net/http" + "strings" + + "k8s.io/apimachinery/pkg/api/validation/path" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + + "k8s.io/klog/v2" +) + +// LongRunningRequestCheck is a predicate which is true for long-running http requests. +type LongRunningRequestCheck func(r *http.Request, requestInfo *RequestInfo) bool + +type RequestInfoResolver interface { + NewRequestInfo(req *http.Request) (*RequestInfo, error) +} + +// RequestInfo holds information parsed from the http.Request +type RequestInfo struct { + // IsResourceRequest indicates whether or not the request is for an API resource or subresource + IsResourceRequest bool + // Path is the URL path of the request + Path string + // Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch. + // for non-resource requests, this is the lowercase http verb + Verb string + + APIPrefix string + APIGroup string + APIVersion string + Namespace string + // Resource is the name of the resource being requested. This is not the kind. For example: pods + Resource string + // Subresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. + // For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" + // (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding". + Subresource string + // Name is empty for some verbs, but if the request directly indicates a name (not in body content) then this field is filled in. + Name string + // Parts are the path parts for the request, always starting with /{resource}/{name} + Parts []string +} + +// specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal +// CRUDdy GET/POST/PUT/DELETE actions on REST objects. +// TODO: find a way to keep this up to date automatically. Maybe dynamically populate list as handlers added to +// master's Mux. +var specialVerbs = sets.NewString("proxy", "watch") + +// specialVerbsNoSubresources contains root verbs which do not allow subresources +var specialVerbsNoSubresources = sets.NewString("proxy") + +// namespaceSubresources contains subresources of namespace +// this list allows the parser to distinguish between a namespace subresource, and a namespaced resource +var namespaceSubresources = sets.NewString("status", "finalize") + +// NamespaceSubResourcesForTest exports namespaceSubresources for testing in pkg/controlplane/master_test.go, so we never drift +var NamespaceSubResourcesForTest = sets.NewString(namespaceSubresources.List()...) + +type RequestInfoFactory struct { + APIPrefixes sets.String // without leading and trailing slashes + GrouplessAPIPrefixes sets.String // without leading and trailing slashes +} + +// TODO write an integration test against the swagger doc to test the RequestInfo and match up behavior to responses +// NewRequestInfo returns the information from the http request. If error is not nil, RequestInfo holds the information as best it is known before the failure +// It handles both resource and non-resource requests and fills in all the pertinent information for each. +// Valid Inputs: +// Resource paths +// /apis/{api-group}/{version}/namespaces +// /api/{version}/namespaces +// /api/{version}/namespaces/{namespace} +// /api/{version}/namespaces/{namespace}/{resource} +// /api/{version}/namespaces/{namespace}/{resource}/{resourceName} +// /api/{version}/{resource} +// /api/{version}/{resource}/{resourceName} +// +// Special verbs without subresources: +// /api/{version}/proxy/{resource}/{resourceName} +// /api/{version}/proxy/namespaces/{namespace}/{resource}/{resourceName} +// +// Special verbs with subresources: +// /api/{version}/watch/{resource} +// /api/{version}/watch/namespaces/{namespace}/{resource} +// +// NonResource paths +// /apis/{api-group}/{version} +// /apis/{api-group} +// /apis +// /api/{version} +// /api +// /healthz +// / +func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, error) { + // start with a non-resource request until proven otherwise + requestInfo := RequestInfo{ + IsResourceRequest: false, + Path: req.URL.Path, + Verb: strings.ToLower(req.Method), + } + + currentParts := splitPath(req.URL.Path) + if len(currentParts) < 3 { + // return a non-resource request + return &requestInfo, nil + } + + if !r.APIPrefixes.Has(currentParts[0]) { + // return a non-resource request + return &requestInfo, nil + } + requestInfo.APIPrefix = currentParts[0] + currentParts = currentParts[1:] + + if !r.GrouplessAPIPrefixes.Has(requestInfo.APIPrefix) { + // one part (APIPrefix) has already been consumed, so this is actually "do we have four parts?" + if len(currentParts) < 3 { + // return a non-resource request + return &requestInfo, nil + } + + requestInfo.APIGroup = currentParts[0] + currentParts = currentParts[1:] + } + + requestInfo.IsResourceRequest = true + requestInfo.APIVersion = currentParts[0] + currentParts = currentParts[1:] + + // handle input of form /{specialVerb}/* + if specialVerbs.Has(currentParts[0]) { + if len(currentParts) < 2 { + return &requestInfo, fmt.Errorf("unable to determine kind and namespace from url, %v", req.URL) + } + + requestInfo.Verb = currentParts[0] + currentParts = currentParts[1:] + + } else { + switch req.Method { + case "POST": + requestInfo.Verb = "create" + case "GET", "HEAD": + requestInfo.Verb = "get" + case "PUT": + requestInfo.Verb = "update" + case "PATCH": + requestInfo.Verb = "patch" + case "DELETE": + requestInfo.Verb = "delete" + default: + requestInfo.Verb = "" + } + } + + // URL forms: /namespaces/{namespace}/{kind}/*, where parts are adjusted to be relative to kind + if currentParts[0] == "namespaces" { + if len(currentParts) > 1 { + requestInfo.Namespace = currentParts[1] + + // if there is another step after the namespace name and it is not a known namespace subresource + // move currentParts to include it as a resource in its own right + if len(currentParts) > 2 && !namespaceSubresources.Has(currentParts[2]) { + currentParts = currentParts[2:] + } + } + } else { + requestInfo.Namespace = metav1.NamespaceNone + } + + // parsing successful, so we now know the proper value for .Parts + requestInfo.Parts = currentParts + + // parts look like: resource/resourceName/subresource/other/stuff/we/don't/interpret + switch { + case len(requestInfo.Parts) >= 3 && !specialVerbsNoSubresources.Has(requestInfo.Verb): + requestInfo.Subresource = requestInfo.Parts[2] + fallthrough + case len(requestInfo.Parts) >= 2: + requestInfo.Name = requestInfo.Parts[1] + fallthrough + case len(requestInfo.Parts) >= 1: + requestInfo.Resource = requestInfo.Parts[0] + } + + // if there's no name on the request and we thought it was a get before, then the actual verb is a list or a watch + if len(requestInfo.Name) == 0 && requestInfo.Verb == "get" { + opts := metainternalversion.ListOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { + // An error in parsing request will result in default to "list" and not setting "name" field. + klog.Errorf("Couldn't parse request %#v: %v", req.URL.Query(), err) + // Reset opts to not rely on partial results from parsing. + // However, if watch is set, let's report it. + opts = metainternalversion.ListOptions{} + if values := req.URL.Query()["watch"]; len(values) > 0 { + switch strings.ToLower(values[0]) { + case "false", "0": + default: + opts.Watch = true + } + } + } + + if opts.Watch { + requestInfo.Verb = "watch" + } else { + requestInfo.Verb = "list" + } + + if opts.FieldSelector != nil { + if name, ok := opts.FieldSelector.RequiresExactMatch("metadata.name"); ok { + if len(path.IsValidPathSegmentName(name)) == 0 { + requestInfo.Name = name + } + } + } + } + // if there's no name on the request and we thought it was a delete before, then the actual verb is deletecollection + if len(requestInfo.Name) == 0 && requestInfo.Verb == "delete" { + requestInfo.Verb = "deletecollection" + } + + return &requestInfo, nil +} + +type requestInfoKeyType int + +// requestInfoKey is the RequestInfo key for the context. It's of private type here. Because +// keys are interfaces and interfaces are equal when the type and the value is equal, this +// does not conflict with the keys defined in pkg/api. +const requestInfoKey requestInfoKeyType = iota + +// WithRequestInfo returns a copy of parent in which the request info value is set +func WithRequestInfo(parent context.Context, info *RequestInfo) context.Context { + return WithValue(parent, requestInfoKey, info) +} + +// RequestInfoFrom returns the value of the RequestInfo key on the ctx +func RequestInfoFrom(ctx context.Context) (*RequestInfo, bool) { + info, ok := ctx.Value(requestInfoKey).(*RequestInfo) + return info, ok +} + +// splitPath returns the segments for a URL path. +func splitPath(path string) []string { + path = strings.Trim(path, "/") + if path == "" { + return []string{} + } + return strings.Split(path, "/") +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/warning/warning.go b/vendor/k8s.io/apiserver/pkg/endpoints/warning/warning.go new file mode 100644 index 0000000000..c0dde6e06d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/warning/warning.go @@ -0,0 +1,39 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package warning + +import ( + restful "github.com/emicklei/go-restful" + + "k8s.io/apiserver/pkg/warning" +) + +// AddWarningsHandler returns a handler that adds the provided warnings to all requests, +// then delegates to the provided handler. +func AddWarningsHandler(handler restful.RouteFunction, warnings []string) restful.RouteFunction { + if len(warnings) == 0 { + return handler + } + + return func(req *restful.Request, res *restful.Response) { + ctx := req.Request.Context() + for _, msg := range warnings { + warning.AddWarning(ctx, "", msg) + } + handler(req, res) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/features/OWNERS b/vendor/k8s.io/apiserver/pkg/features/OWNERS new file mode 100644 index 0000000000..05b08249ae --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/features/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- feature-approvers diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go new file mode 100644 index 0000000000..612be9845a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -0,0 +1,190 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + "k8s.io/apimachinery/pkg/util/runtime" + + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/component-base/featuregate" +) + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.4 + // MyFeature() bool + + // owner: @tallclair + // alpha: v1.5 + // beta: v1.6 + // deprecated: v1.18 + // + // StreamingProxyRedirects controls whether the apiserver should intercept (and follow) + // redirects from the backend (Kubelet) for streaming requests (exec/attach/port-forward). + // + // This feature is deprecated, and will be removed in v1.22. + StreamingProxyRedirects featuregate.Feature = "StreamingProxyRedirects" + + // owner: @tallclair + // alpha: v1.12 + // beta: v1.14 + // + // ValidateProxyRedirects controls whether the apiserver should validate that redirects are only + // followed to the same host. Only used if StreamingProxyRedirects is enabled. + ValidateProxyRedirects featuregate.Feature = "ValidateProxyRedirects" + + // owner: @tallclair + // alpha: v1.7 + // beta: v1.8 + // GA: v1.12 + // + // AdvancedAuditing enables a much more general API auditing pipeline, which includes support for + // pluggable output backends and an audit policy specifying how different requests should be + // audited. + AdvancedAuditing featuregate.Feature = "AdvancedAuditing" + + // owner: @ilackams + // alpha: v1.7 + // beta: v1.16 + // + // Enables compression of REST responses (GET and LIST only) + APIResponseCompression featuregate.Feature = "APIResponseCompression" + + // owner: @smarterclayton + // alpha: v1.8 + // beta: v1.9 + // + // Allow API clients to retrieve resource lists in chunks rather than + // all at once. + APIListChunking featuregate.Feature = "APIListChunking" + + // owner: @apelisse + // alpha: v1.12 + // beta: v1.13 + // stable: v1.18 + // + // Allow requests to be processed but not stored, so that + // validation, merging, mutation can be tested without + // committing. + DryRun featuregate.Feature = "DryRun" + + // owner: @caesarxuchao + // alpha: v1.15 + // beta: v1.16 + // + // Allow apiservers to show a count of remaining items in the response + // to a chunking list request. + RemainingItemCount featuregate.Feature = "RemainingItemCount" + + // owner: @apelisse, @lavalamp + // alpha: v1.14 + // beta: v1.16 + // + // Server-side apply. Merging happens on the server. + ServerSideApply featuregate.Feature = "ServerSideApply" + + // owner: @caesarxuchao + // alpha: v1.14 + // beta: v1.15 + // + // Allow apiservers to expose the storage version hash in the discovery + // document. + StorageVersionHash featuregate.Feature = "StorageVersionHash" + + // owner: @caesarxuchao @roycaihw + // alpha: v1.20 + // + // Enable the storage version API. + StorageVersionAPI featuregate.Feature = "StorageVersionAPI" + + // owner: @wojtek-t + // alpha: v1.15 + // beta: v1.16 + // GA: v1.17 + // + // Enables support for watch bookmark events. + WatchBookmark featuregate.Feature = "WatchBookmark" + + // owner: @MikeSpreitzer @yue9944882 + // alpha: v1.15 + // + // + // Enables managing request concurrency with prioritization and fairness at each server + APIPriorityAndFairness featuregate.Feature = "APIPriorityAndFairness" + + // owner: @wojtek-t + // alpha: v1.16 + // beta: v1.20 + // + // Deprecates and removes SelfLink from ObjectMeta and ListMeta. + RemoveSelfLink featuregate.Feature = "RemoveSelfLink" + + // owner: @shaloulcy, @wojtek-t + // alpha: v1.18 + // beta: v1.19 + // GA: v1.20 + // + // Allows label and field based indexes in apiserver watch cache to accelerate list operations. + SelectorIndex featuregate.Feature = "SelectorIndex" + + // owner: @liggitt + // beta: v1.19 + // + // Allows sending warning headers in API responses. + WarningHeaders featuregate.Feature = "WarningHeaders" + + // owner: @wojtek-t + // alpha: v1.20 + // + // Allows for updating watchcache resource version with progress notify events. + EfficientWatchResumption featuregate.Feature = "EfficientWatchResumption" + + // owner: @roycaihw + // alpha: v1.20 + // + // Assigns each kube-apiserver an ID in a cluster. + APIServerIdentity featuregate.Feature = "APIServerIdentity" +) + +func init() { + runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates)) +} + +// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// To add a new feature, define a key for it above and add it here. The features will be +// available throughout Kubernetes binaries. +var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ + StreamingProxyRedirects: {Default: true, PreRelease: featuregate.Deprecated}, + ValidateProxyRedirects: {Default: true, PreRelease: featuregate.Beta}, + AdvancedAuditing: {Default: true, PreRelease: featuregate.GA}, + APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, + APIListChunking: {Default: true, PreRelease: featuregate.Beta}, + DryRun: {Default: true, PreRelease: featuregate.GA}, + RemainingItemCount: {Default: true, PreRelease: featuregate.Beta}, + ServerSideApply: {Default: true, PreRelease: featuregate.Beta}, + StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, + StorageVersionAPI: {Default: false, PreRelease: featuregate.Alpha}, + WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta}, + RemoveSelfLink: {Default: true, PreRelease: featuregate.Beta}, + SelectorIndex: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + WarningHeaders: {Default: true, PreRelease: featuregate.Beta}, + EfficientWatchResumption: {Default: false, PreRelease: featuregate.Alpha}, + APIServerIdentity: {Default: false, PreRelease: featuregate.Alpha}, +} diff --git a/vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS b/vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS new file mode 100644 index 0000000000..d812b5d3ee --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS @@ -0,0 +1,13 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- deads2k +- derekwaynecarr +- vishh +reviewers: +- deads2k +- derekwaynecarr +- smarterclayton +- vishh +labels: +- sig/api-machinery diff --git a/vendor/k8s.io/apiserver/pkg/quota/v1/interfaces.go b/vendor/k8s.io/apiserver/pkg/quota/v1/interfaces.go new file mode 100644 index 0000000000..15f8b7613d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/quota/v1/interfaces.go @@ -0,0 +1,88 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/tools/cache" +) + +// UsageStatsOptions is an options structs that describes how stats should be calculated +type UsageStatsOptions struct { + // Namespace where stats should be calculate + Namespace string + // Scopes that must match counted objects + Scopes []corev1.ResourceQuotaScope + // Resources are the set of resources to include in the measurement + Resources []corev1.ResourceName + ScopeSelector *corev1.ScopeSelector +} + +// UsageStats is result of measuring observed resource use in the system +type UsageStats struct { + // Used maps resource to quantity used + Used corev1.ResourceList +} + +// Evaluator knows how to evaluate quota usage for a particular group resource +type Evaluator interface { + // Constraints ensures that each required resource is present on item + Constraints(required []corev1.ResourceName, item runtime.Object) error + // GroupResource returns the groupResource that this object knows how to evaluate + GroupResource() schema.GroupResource + // Handles determines if quota could be impacted by the specified attribute. + // If true, admission control must perform quota processing for the operation, otherwise it is safe to ignore quota. + Handles(operation admission.Attributes) bool + // Matches returns true if the specified quota matches the input item + Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) + // MatchingScopes takes the input specified list of scopes and input object and returns the set of scopes that matches input object. + MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) + // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope + UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) + // MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches. + MatchingResources(input []corev1.ResourceName) []corev1.ResourceName + // Usage returns the resource usage for the specified object + Usage(item runtime.Object) (corev1.ResourceList, error) + // UsageStats calculates latest observed usage stats for all objects + UsageStats(options UsageStatsOptions) (UsageStats, error) +} + +// Configuration defines how the quota system is configured. +type Configuration interface { + // IgnoredResources are ignored by quota. + IgnoredResources() map[schema.GroupResource]struct{} + // Evaluators for quota evaluation. + Evaluators() []Evaluator +} + +// Registry maintains a list of evaluators +type Registry interface { + // Add to registry + Add(e Evaluator) + // Remove from registry + Remove(e Evaluator) + // Get by group resource + Get(gr schema.GroupResource) Evaluator + // List from registry + List() []Evaluator +} + +// ListerForResourceFunc knows how to get a lister for a specific resource +type ListerForResourceFunc func(schema.GroupVersionResource) (cache.GenericLister, error) diff --git a/vendor/k8s.io/apiserver/pkg/quota/v1/resources.go b/vendor/k8s.io/apiserver/pkg/quota/v1/resources.go new file mode 100644 index 0000000000..3c2927d738 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/quota/v1/resources.go @@ -0,0 +1,293 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Equals returns true if the two lists are equivalent +func Equals(a corev1.ResourceList, b corev1.ResourceList) bool { + if len(a) != len(b) { + return false + } + + for key, value1 := range a { + value2, found := b[key] + if !found { + return false + } + if value1.Cmp(value2) != 0 { + return false + } + } + + return true +} + +// LessThanOrEqual returns true if a < b for each key in b +// If false, it returns the keys in a that exceeded b +func LessThanOrEqual(a corev1.ResourceList, b corev1.ResourceList) (bool, []corev1.ResourceName) { + result := true + resourceNames := []corev1.ResourceName{} + for key, value := range b { + if other, found := a[key]; found { + if other.Cmp(value) > 0 { + result = false + resourceNames = append(resourceNames, key) + } + } + } + return result, resourceNames +} + +// Max returns the result of Max(a, b) for each named resource +func Max(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for key, value := range a { + if other, found := b[key]; found { + if value.Cmp(other) <= 0 { + result[key] = other.DeepCopy() + continue + } + } + result[key] = value.DeepCopy() + } + for key, value := range b { + if _, found := result[key]; !found { + result[key] = value.DeepCopy() + } + } + return result +} + +// Add returns the result of a + b for each named resource +func Add(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for key, value := range a { + quantity := value.DeepCopy() + if other, found := b[key]; found { + quantity.Add(other) + } + result[key] = quantity + } + for key, value := range b { + if _, found := result[key]; !found { + result[key] = value.DeepCopy() + } + } + return result +} + +// SubtractWithNonNegativeResult - subtracts and returns result of a - b but +// makes sure we don't return negative values to prevent negative resource usage. +func SubtractWithNonNegativeResult(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + zero := resource.MustParse("0") + + result := corev1.ResourceList{} + for key, value := range a { + quantity := value.DeepCopy() + if other, found := b[key]; found { + quantity.Sub(other) + } + if quantity.Cmp(zero) > 0 { + result[key] = quantity + } else { + result[key] = zero + } + } + + for key := range b { + if _, found := result[key]; !found { + result[key] = zero + } + } + return result +} + +// Subtract returns the result of a - b for each named resource +func Subtract(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for key, value := range a { + quantity := value.DeepCopy() + if other, found := b[key]; found { + quantity.Sub(other) + } + result[key] = quantity + } + for key, value := range b { + if _, found := result[key]; !found { + quantity := value.DeepCopy() + quantity.Neg() + result[key] = quantity + } + } + return result +} + +// Mask returns a new resource list that only has the values with the specified names +func Mask(resources corev1.ResourceList, names []corev1.ResourceName) corev1.ResourceList { + nameSet := ToSet(names) + result := corev1.ResourceList{} + for key, value := range resources { + if nameSet.Has(string(key)) { + result[key] = value.DeepCopy() + } + } + return result +} + +// ResourceNames returns a list of all resource names in the ResourceList +func ResourceNames(resources corev1.ResourceList) []corev1.ResourceName { + result := []corev1.ResourceName{} + for resourceName := range resources { + result = append(result, resourceName) + } + return result +} + +// Contains returns true if the specified item is in the list of items +func Contains(items []corev1.ResourceName, item corev1.ResourceName) bool { + for _, i := range items { + if i == item { + return true + } + } + return false +} + +// ContainsPrefix returns true if the specified item has a prefix that contained in given prefix Set +func ContainsPrefix(prefixSet []string, item corev1.ResourceName) bool { + for _, prefix := range prefixSet { + if strings.HasPrefix(string(item), prefix) { + return true + } + } + return false +} + +// Intersection returns the intersection of both list of resources, deduped and sorted +func Intersection(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName { + result := make([]corev1.ResourceName, 0, len(a)) + for _, item := range a { + if Contains(result, item) { + continue + } + if !Contains(b, item) { + continue + } + result = append(result, item) + } + sort.Slice(result, func(i, j int) bool { return result[i] < result[j] }) + return result +} + +// Difference returns the list of resources resulting from a-b, deduped and sorted +func Difference(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName { + result := make([]corev1.ResourceName, 0, len(a)) + for _, item := range a { + if Contains(b, item) || Contains(result, item) { + continue + } + result = append(result, item) + } + sort.Slice(result, func(i, j int) bool { return result[i] < result[j] }) + return result +} + +// IsZero returns true if each key maps to the quantity value 0 +func IsZero(a corev1.ResourceList) bool { + zero := resource.MustParse("0") + for _, v := range a { + if v.Cmp(zero) != 0 { + return false + } + } + return true +} + +// IsNegative returns the set of resource names that have a negative value. +func IsNegative(a corev1.ResourceList) []corev1.ResourceName { + results := []corev1.ResourceName{} + zero := resource.MustParse("0") + for k, v := range a { + if v.Cmp(zero) < 0 { + results = append(results, k) + } + } + return results +} + +// ToSet takes a list of resource names and converts to a string set +func ToSet(resourceNames []corev1.ResourceName) sets.String { + result := sets.NewString() + for _, resourceName := range resourceNames { + result.Insert(string(resourceName)) + } + return result +} + +// CalculateUsage calculates and returns the requested ResourceList usage. +// If an error is returned, usage only contains the resources which encountered no calculation errors. +func CalculateUsage(namespaceName string, scopes []corev1.ResourceQuotaScope, hardLimits corev1.ResourceList, registry Registry, scopeSelector *corev1.ScopeSelector) (corev1.ResourceList, error) { + // find the intersection between the hard resources on the quota + // and the resources this controller can track to know what we can + // look to measure updated usage stats for + hardResources := ResourceNames(hardLimits) + potentialResources := []corev1.ResourceName{} + evaluators := registry.List() + for _, evaluator := range evaluators { + potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...) + } + // NOTE: the intersection just removes duplicates since the evaluator match intersects with hard + matchedResources := Intersection(hardResources, potentialResources) + + errors := []error{} + + // sum the observed usage from each evaluator + newUsage := corev1.ResourceList{} + for _, evaluator := range evaluators { + // only trigger the evaluator if it matches a resource in the quota, otherwise, skip calculating anything + intersection := evaluator.MatchingResources(matchedResources) + if len(intersection) == 0 { + continue + } + + usageStatsOptions := UsageStatsOptions{Namespace: namespaceName, Scopes: scopes, Resources: intersection, ScopeSelector: scopeSelector} + stats, err := evaluator.UsageStats(usageStatsOptions) + if err != nil { + // remember the error + errors = append(errors, err) + // exclude resources which encountered calculation errors + matchedResources = Difference(matchedResources, intersection) + continue + } + newUsage = Add(newUsage, stats.Used) + } + + // mask the observed usage to only the set of resources tracked by this quota + // merge our observed usage with the quota usage status + // if the new usage is different than the last usage, we will need to do an update + newUsage = Mask(newUsage, matchedResources) + return newUsage, utilerrors.NewAggregate(errors) +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS b/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS new file mode 100644 index 0000000000..3ac0d161dc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS @@ -0,0 +1,28 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- yujuhong +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- davidopp +- saad-ali +- janetkuo +- pwittrock +- ncdc +- piosz +- dims +- hongchaodeng +- krousey +- xiang90 +- resouer +- sdminonne +- enj diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/doc.go b/vendor/k8s.io/apiserver/pkg/registry/generic/doc.go new file mode 100644 index 0000000000..ea79d130a7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package generic provides a generic object store interface and a +// generic label/field matching type. +package generic // import "k8s.io/apiserver/pkg/registry/generic" diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/matcher.go b/vendor/k8s.io/apiserver/pkg/registry/generic/matcher.go new file mode 100644 index 0000000000..4364374ef4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/matcher.go @@ -0,0 +1,52 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" +) + +// ObjectMetaFieldsSet returns a fields that represent the ObjectMeta. +func ObjectMetaFieldsSet(objectMeta *metav1.ObjectMeta, hasNamespaceField bool) fields.Set { + if !hasNamespaceField { + return fields.Set{ + "metadata.name": objectMeta.Name, + } + } + return fields.Set{ + "metadata.name": objectMeta.Name, + "metadata.namespace": objectMeta.Namespace, + } +} + +// AdObjectMetaField add fields that represent the ObjectMeta to source. +func AddObjectMetaFieldsSet(source fields.Set, objectMeta *metav1.ObjectMeta, hasNamespaceField bool) fields.Set { + source["metadata.name"] = objectMeta.Name + if hasNamespaceField { + source["metadata.namespace"] = objectMeta.Namespace + } + return source +} + +// MergeFieldsSets merges a fields'set from fragment into the source. +func MergeFieldsSets(source fields.Set, fragment fields.Set) fields.Set { + for k, value := range fragment { + source[k] = value + } + return source +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/options.go b/vendor/k8s.io/apiserver/pkg/registry/generic/options.go new file mode 100644 index 0000000000..577192b626 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/options.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/client-go/tools/cache" +) + +// RESTOptions is set of configuration options to generic registries. +type RESTOptions struct { + StorageConfig *storagebackend.Config + Decorator StorageDecorator + + EnableGarbageCollection bool + DeleteCollectionWorkers int + ResourcePrefix string + CountMetricPollPeriod time.Duration +} + +// Implement RESTOptionsGetter so that RESTOptions can directly be used when available (i.e. tests) +func (opts RESTOptions) GetRESTOptions(schema.GroupResource) (RESTOptions, error) { + return opts, nil +} + +type RESTOptionsGetter interface { + GetRESTOptions(resource schema.GroupResource) (RESTOptions, error) +} + +// StoreOptions is set of configuration options used to complete generic registries. +type StoreOptions struct { + RESTOptions RESTOptionsGetter + TriggerFunc storage.IndexerFuncs + AttrFunc storage.AttrFunc + Indexers *cache.Indexers +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go new file mode 100644 index 0000000000..005a376d40 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "context" + "net/http" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" +) + +type decoratedWatcher struct { + w watch.Interface + decorator ObjectFunc + cancel context.CancelFunc + resultCh chan watch.Event +} + +func newDecoratedWatcher(w watch.Interface, decorator ObjectFunc) *decoratedWatcher { + ctx, cancel := context.WithCancel(context.Background()) + d := &decoratedWatcher{ + w: w, + decorator: decorator, + cancel: cancel, + resultCh: make(chan watch.Event), + } + go d.run(ctx) + return d +} + +func (d *decoratedWatcher) run(ctx context.Context) { + var recv, send watch.Event + var ok bool + for { + select { + case recv, ok = <-d.w.ResultChan(): + // The underlying channel may be closed after timeout. + if !ok { + d.cancel() + return + } + switch recv.Type { + case watch.Added, watch.Modified, watch.Deleted, watch.Bookmark: + err := d.decorator(recv.Object) + if err != nil { + send = makeStatusErrorEvent(err) + break + } + send = recv + case watch.Error: + send = recv + } + select { + case d.resultCh <- send: + if send.Type == watch.Error { + d.cancel() + } + case <-ctx.Done(): + } + case <-ctx.Done(): + d.w.Stop() + close(d.resultCh) + return + } + } +} + +func (d *decoratedWatcher) Stop() { + d.cancel() +} + +func (d *decoratedWatcher) ResultChan() <-chan watch.Event { + return d.resultCh +} + +func makeStatusErrorEvent(err error) watch.Event { + status := &metav1.Status{ + Status: metav1.StatusFailure, + Message: err.Error(), + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonInternalError, + } + return watch.Event{ + Type: watch.Error, + Object: status, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go new file mode 100644 index 0000000000..bd315ae47b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package etcd has a generic implementation of a registry that +// stores things in etcd. +package registry // import "k8s.io/apiserver/pkg/registry/generic/registry" diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go new file mode 100644 index 0000000000..2f184c50e0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/storage" +) + +type DryRunnableStorage struct { + Storage storage.Interface + Codec runtime.Codec +} + +func (s *DryRunnableStorage) Versioner() storage.Versioner { + return s.Storage.Versioner() +} + +func (s *DryRunnableStorage) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64, dryRun bool) error { + if dryRun { + if err := s.Storage.Get(ctx, key, storage.GetOptions{}, out); err == nil { + return storage.NewKeyExistsError(key, 0) + } + return s.copyInto(obj, out) + } + return s.Storage.Create(ctx, key, obj, out, ttl) +} + +func (s *DryRunnableStorage) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, deleteValidation storage.ValidateObjectFunc, dryRun bool) error { + if dryRun { + if err := s.Storage.Get(ctx, key, storage.GetOptions{}, out); err != nil { + return err + } + if err := preconditions.Check(key, out); err != nil { + return err + } + return deleteValidation(ctx, out) + } + return s.Storage.Delete(ctx, key, out, preconditions, deleteValidation) +} + +func (s *DryRunnableStorage) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return s.Storage.Watch(ctx, key, opts) +} + +func (s *DryRunnableStorage) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return s.Storage.WatchList(ctx, key, opts) +} + +func (s *DryRunnableStorage) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error { + return s.Storage.Get(ctx, key, opts, objPtr) +} + +func (s *DryRunnableStorage) GetToList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + return s.Storage.GetToList(ctx, key, opts, listObj) +} + +func (s *DryRunnableStorage) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + return s.Storage.List(ctx, key, opts, listObj) +} + +func (s *DryRunnableStorage) GuaranteedUpdate( + ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, + preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, suggestion runtime.Object) error { + if dryRun { + err := s.Storage.Get(ctx, key, storage.GetOptions{IgnoreNotFound: ignoreNotFound}, ptrToType) + if err != nil { + return err + } + err = preconditions.Check(key, ptrToType) + if err != nil { + return err + } + rev, err := s.Versioner().ObjectResourceVersion(ptrToType) + if err != nil { + return err + } + out, _, err := tryUpdate(ptrToType, storage.ResponseMeta{ResourceVersion: rev}) + if err != nil { + return err + } + return s.copyInto(out, ptrToType) + } + return s.Storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, suggestion) +} + +func (s *DryRunnableStorage) Count(key string) (int64, error) { + return s.Storage.Count(key) +} + +func (s *DryRunnableStorage) copyInto(in, out runtime.Object) error { + var data []byte + + data, err := runtime.Encode(s.Codec, in) + if err != nil { + return err + } + _, _, err = s.Codec.Decode(data, nil, out) + if err != nil { + return err + } + return nil + +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go new file mode 100644 index 0000000000..8c31bf8195 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go @@ -0,0 +1,137 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "fmt" + "sync" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/storage" + cacherstorage "k8s.io/apiserver/pkg/storage/cacher" + "k8s.io/apiserver/pkg/storage/etcd3" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/apiserver/pkg/storage/storagebackend/factory" + "k8s.io/client-go/tools/cache" +) + +// Creates a cacher based given storageConfig. +func StorageWithCacher() generic.StorageDecorator { + return func( + storageConfig *storagebackend.Config, + resourcePrefix string, + keyFunc func(obj runtime.Object) (string, error), + newFunc func() runtime.Object, + newListFunc func() runtime.Object, + getAttrsFunc storage.AttrFunc, + triggerFuncs storage.IndexerFuncs, + indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) { + + s, d, err := generic.NewRawStorage(storageConfig, newFunc) + if err != nil { + return s, d, err + } + if klog.V(5).Enabled() { + klog.Infof("Storage caching is enabled for %s", objectTypeToString(newFunc())) + } + + cacherConfig := cacherstorage.Config{ + Storage: s, + Versioner: etcd3.APIObjectVersioner{}, + ResourcePrefix: resourcePrefix, + KeyFunc: keyFunc, + NewFunc: newFunc, + NewListFunc: newListFunc, + GetAttrsFunc: getAttrsFunc, + IndexerFuncs: triggerFuncs, + Indexers: indexers, + Codec: storageConfig.Codec, + } + cacher, err := cacherstorage.NewCacherFromConfig(cacherConfig) + if err != nil { + return nil, func() {}, err + } + destroyFunc := func() { + cacher.Stop() + d() + } + + // TODO : Remove RegisterStorageCleanup below when PR + // https://github.com/kubernetes/kubernetes/pull/50690 + // merges as that shuts down storage properly + RegisterStorageCleanup(destroyFunc) + + return cacher, destroyFunc, nil + } +} + +func objectTypeToString(obj runtime.Object) string { + // special-case unstructured objects that tell us their apiVersion/kind + if u, isUnstructured := obj.(*unstructured.Unstructured); isUnstructured { + if apiVersion, kind := u.GetAPIVersion(), u.GetKind(); len(apiVersion) > 0 && len(kind) > 0 { + return fmt.Sprintf("apiVersion=%s, kind=%s", apiVersion, kind) + } + } + // otherwise just return the type + return fmt.Sprintf("%T", obj) +} + +// TODO : Remove all the code below when PR +// https://github.com/kubernetes/kubernetes/pull/50690 +// merges as that shuts down storage properly +// HACK ALERT : Track the destroy methods to call them +// from the test harness. TrackStorageCleanup will be called +// only from the test harness, so Register/Cleanup will be +// no-op at runtime. + +var cleanupLock sync.Mutex +var cleanup []func() = nil + +func TrackStorageCleanup() { + cleanupLock.Lock() + defer cleanupLock.Unlock() + + if cleanup != nil { + panic("Conflicting storage tracking") + } + cleanup = make([]func(), 0) +} + +func RegisterStorageCleanup(fn func()) { + cleanupLock.Lock() + defer cleanupLock.Unlock() + + if cleanup == nil { + return + } + cleanup = append(cleanup, fn) +} + +func CleanupStorage() { + cleanupLock.Lock() + old := cleanup + cleanup = nil + cleanupLock.Unlock() + + for _, d := range old { + d() + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go new file mode 100644 index 0000000000..7c2f4c390e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -0,0 +1,1412 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "context" + "fmt" + "reflect" + "strings" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/validation/path" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/storage" + storeerr "k8s.io/apiserver/pkg/storage/errors" + "k8s.io/apiserver/pkg/storage/etcd3/metrics" + "k8s.io/apiserver/pkg/util/dryrun" + "k8s.io/client-go/tools/cache" + + "k8s.io/klog/v2" +) + +// ObjectFunc is a function to act on a given object. An error may be returned +// if the hook cannot be completed. An ObjectFunc may transform the provided +// object. +type ObjectFunc func(obj runtime.Object) error + +// GenericStore interface can be used for type assertions when we need to access the underlying strategies. +type GenericStore interface { + GetCreateStrategy() rest.RESTCreateStrategy + GetUpdateStrategy() rest.RESTUpdateStrategy + GetDeleteStrategy() rest.RESTDeleteStrategy + GetExportStrategy() rest.RESTExportStrategy +} + +// Store implements pkg/api/rest.StandardStorage. It's intended to be +// embeddable and allows the consumer to implement any non-generic functions +// that are required. This object is intended to be copyable so that it can be +// used in different ways but share the same underlying behavior. +// +// All fields are required unless specified. +// +// The intended use of this type is embedding within a Kind specific +// RESTStorage implementation. This type provides CRUD semantics on a Kubelike +// resource, handling details like conflict detection with ResourceVersion and +// semantics. The RESTCreateStrategy, RESTUpdateStrategy, and +// RESTDeleteStrategy are generic across all backends, and encapsulate logic +// specific to the API. +// +// TODO: make the default exposed methods exactly match a generic RESTStorage +type Store struct { + // NewFunc returns a new instance of the type this registry returns for a + // GET of a single object, e.g.: + // + // curl GET /apis/group/version/namespaces/my-ns/myresource/name-of-object + NewFunc func() runtime.Object + + // NewListFunc returns a new list of the type this registry; it is the + // type returned when the resource is listed, e.g.: + // + // curl GET /apis/group/version/namespaces/my-ns/myresource + NewListFunc func() runtime.Object + + // DefaultQualifiedResource is the pluralized name of the resource. + // This field is used if there is no request info present in the context. + // See qualifiedResourceFromContext for details. + DefaultQualifiedResource schema.GroupResource + + // KeyRootFunc returns the root etcd key for this resource; should not + // include trailing "/". This is used for operations that work on the + // entire collection (listing and watching). + // + // KeyRootFunc and KeyFunc must be supplied together or not at all. + KeyRootFunc func(ctx context.Context) string + + // KeyFunc returns the key for a specific object in the collection. + // KeyFunc is called for Create/Update/Get/Delete. Note that 'namespace' + // can be gotten from ctx. + // + // KeyFunc and KeyRootFunc must be supplied together or not at all. + KeyFunc func(ctx context.Context, name string) (string, error) + + // ObjectNameFunc returns the name of an object or an error. + ObjectNameFunc func(obj runtime.Object) (string, error) + + // TTLFunc returns the TTL (time to live) that objects should be persisted + // with. The existing parameter is the current TTL or the default for this + // operation. The update parameter indicates whether this is an operation + // against an existing object. + // + // Objects that are persisted with a TTL are evicted once the TTL expires. + TTLFunc func(obj runtime.Object, existing uint64, update bool) (uint64, error) + + // PredicateFunc returns a matcher corresponding to the provided labels + // and fields. The SelectionPredicate returned should return true if the + // object matches the given field and label selectors. + PredicateFunc func(label labels.Selector, field fields.Selector) storage.SelectionPredicate + + // EnableGarbageCollection affects the handling of Update and Delete + // requests. Enabling garbage collection allows finalizers to do work to + // finalize this object before the store deletes it. + // + // If any store has garbage collection enabled, it must also be enabled in + // the kube-controller-manager. + EnableGarbageCollection bool + + // DeleteCollectionWorkers is the maximum number of workers in a single + // DeleteCollection call. Delete requests for the items in a collection + // are issued in parallel. + DeleteCollectionWorkers int + + // Decorator is an optional exit hook on an object returned from the + // underlying storage. The returned object could be an individual object + // (e.g. Pod) or a list type (e.g. PodList). Decorator is intended for + // integrations that are above storage and should only be used for + // specific cases where storage of the value is not appropriate, since + // they cannot be watched. + Decorator ObjectFunc + // CreateStrategy implements resource-specific behavior during creation. + CreateStrategy rest.RESTCreateStrategy + // AfterCreate implements a further operation to run after a resource is + // created and before it is decorated, optional. + AfterCreate ObjectFunc + + // UpdateStrategy implements resource-specific behavior during updates. + UpdateStrategy rest.RESTUpdateStrategy + // AfterUpdate implements a further operation to run after a resource is + // updated and before it is decorated, optional. + AfterUpdate ObjectFunc + + // DeleteStrategy implements resource-specific behavior during deletion. + DeleteStrategy rest.RESTDeleteStrategy + // AfterDelete implements a further operation to run after a resource is + // deleted and before it is decorated, optional. + AfterDelete ObjectFunc + // ReturnDeletedObject determines whether the Store returns the object + // that was deleted. Otherwise, return a generic success status response. + ReturnDeletedObject bool + // ShouldDeleteDuringUpdate is an optional function to determine whether + // an update from existing to obj should result in a delete. + // If specified, this is checked in addition to standard finalizer, + // deletionTimestamp, and deletionGracePeriodSeconds checks. + ShouldDeleteDuringUpdate func(ctx context.Context, key string, obj, existing runtime.Object) bool + // ExportStrategy implements resource-specific behavior during export, + // optional. Exported objects are not decorated. + ExportStrategy rest.RESTExportStrategy + // TableConvertor is an optional interface for transforming items or lists + // of items into tabular output. If unset, the default will be used. + TableConvertor rest.TableConvertor + + // Storage is the interface for the underlying storage for the + // resource. It is wrapped into a "DryRunnableStorage" that will + // either pass-through or simply dry-run. + Storage DryRunnableStorage + // StorageVersioner outputs the an object will be + // converted to before persisted in etcd, given a list of possible + // kinds of the object. + // If the StorageVersioner is nil, apiserver will leave the + // storageVersionHash as empty in the discovery document. + StorageVersioner runtime.GroupVersioner + // Called to cleanup clients used by the underlying Storage; optional. + DestroyFunc func() +} + +// Note: the rest.StandardStorage interface aggregates the common REST verbs +var _ rest.StandardStorage = &Store{} +var _ rest.Exporter = &Store{} +var _ rest.TableConvertor = &Store{} +var _ GenericStore = &Store{} + +const ( + OptimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" + resourceCountPollPeriodJitter = 1.2 +) + +// NamespaceKeyRootFunc is the default function for constructing storage paths +// to resource directories enforcing namespace rules. +func NamespaceKeyRootFunc(ctx context.Context, prefix string) string { + key := prefix + ns, ok := genericapirequest.NamespaceFrom(ctx) + if ok && len(ns) > 0 { + key = key + "/" + ns + } + return key +} + +// NamespaceKeyFunc is the default function for constructing storage paths to +// a resource relative to the given prefix enforcing namespace rules. If the +// context does not contain a namespace, it errors. +func NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) { + key := NamespaceKeyRootFunc(ctx, prefix) + ns, ok := genericapirequest.NamespaceFrom(ctx) + if !ok || len(ns) == 0 { + return "", apierrors.NewBadRequest("Namespace parameter required.") + } + if len(name) == 0 { + return "", apierrors.NewBadRequest("Name parameter required.") + } + if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) + } + key = key + "/" + name + return key, nil +} + +// NoNamespaceKeyFunc is the default function for constructing storage paths +// to a resource relative to the given prefix without a namespace. +func NoNamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) { + if len(name) == 0 { + return "", apierrors.NewBadRequest("Name parameter required.") + } + if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) + } + key := prefix + "/" + name + return key, nil +} + +// New implements RESTStorage.New. +func (e *Store) New() runtime.Object { + return e.NewFunc() +} + +// NewList implements rest.Lister. +func (e *Store) NewList() runtime.Object { + return e.NewListFunc() +} + +// NamespaceScoped indicates whether the resource is namespaced +func (e *Store) NamespaceScoped() bool { + if e.CreateStrategy != nil { + return e.CreateStrategy.NamespaceScoped() + } + if e.UpdateStrategy != nil { + return e.UpdateStrategy.NamespaceScoped() + } + + panic("programmer error: no CRUD for resource, you're crazy, override NamespaceScoped too") +} + +// GetCreateStrategy implements GenericStore. +func (e *Store) GetCreateStrategy() rest.RESTCreateStrategy { + return e.CreateStrategy +} + +// GetUpdateStrategy implements GenericStore. +func (e *Store) GetUpdateStrategy() rest.RESTUpdateStrategy { + return e.UpdateStrategy +} + +// GetDeleteStrategy implements GenericStore. +func (e *Store) GetDeleteStrategy() rest.RESTDeleteStrategy { + return e.DeleteStrategy +} + +// GetExportStrategy implements GenericStore. +func (e *Store) GetExportStrategy() rest.RESTExportStrategy { + return e.ExportStrategy +} + +// List returns a list of items matching labels and field according to the +// store's PredicateFunc. +func (e *Store) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) { + label := labels.Everything() + if options != nil && options.LabelSelector != nil { + label = options.LabelSelector + } + field := fields.Everything() + if options != nil && options.FieldSelector != nil { + field = options.FieldSelector + } + out, err := e.ListPredicate(ctx, e.PredicateFunc(label, field), options) + if err != nil { + return nil, err + } + if e.Decorator != nil { + if err := e.Decorator(out); err != nil { + return nil, err + } + } + return out, nil +} + +// ListPredicate returns a list of all the items matching the given +// SelectionPredicate. +func (e *Store) ListPredicate(ctx context.Context, p storage.SelectionPredicate, options *metainternalversion.ListOptions) (runtime.Object, error) { + if options == nil { + // By default we should serve the request from etcd. + options = &metainternalversion.ListOptions{ResourceVersion: ""} + } + p.Limit = options.Limit + p.Continue = options.Continue + list := e.NewListFunc() + qualifiedResource := e.qualifiedResourceFromContext(ctx) + storageOpts := storage.ListOptions{ResourceVersion: options.ResourceVersion, ResourceVersionMatch: options.ResourceVersionMatch, Predicate: p} + if name, ok := p.MatchesSingle(); ok { + if key, err := e.KeyFunc(ctx, name); err == nil { + err := e.Storage.GetToList(ctx, key, storageOpts, list) + return list, storeerr.InterpretListError(err, qualifiedResource) + } + // if we cannot extract a key based on the current context, the optimization is skipped + } + + err := e.Storage.List(ctx, e.KeyRootFunc(ctx), storageOpts, list) + return list, storeerr.InterpretListError(err, qualifiedResource) +} + +// Create inserts a new item according to the unique key from the object. +func (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { + if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { + return nil, err + } + // at this point we have a fully formed object. It is time to call the validators that the apiserver + // handling chain wants to enforce. + if createValidation != nil { + if err := createValidation(ctx, obj.DeepCopyObject()); err != nil { + return nil, err + } + } + + name, err := e.ObjectNameFunc(obj) + if err != nil { + return nil, err + } + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, err + } + qualifiedResource := e.qualifiedResourceFromContext(ctx) + ttl, err := e.calculateTTL(obj, 0, false) + if err != nil { + return nil, err + } + out := e.NewFunc() + if err := e.Storage.Create(ctx, key, obj, out, ttl, dryrun.IsDryRun(options.DryRun)); err != nil { + err = storeerr.InterpretCreateError(err, qualifiedResource, name) + err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj) + if !apierrors.IsAlreadyExists(err) { + return nil, err + } + if errGet := e.Storage.Get(ctx, key, storage.GetOptions{}, out); errGet != nil { + return nil, err + } + accessor, errGetAcc := meta.Accessor(out) + if errGetAcc != nil { + return nil, err + } + if accessor.GetDeletionTimestamp() != nil { + msg := &err.(*apierrors.StatusError).ErrStatus.Message + *msg = fmt.Sprintf("object is being deleted: %s", *msg) + } + return nil, err + } + if e.AfterCreate != nil { + if err := e.AfterCreate(out); err != nil { + return nil, err + } + } + if e.Decorator != nil { + if err := e.Decorator(out); err != nil { + return nil, err + } + } + return out, nil +} + +// ShouldDeleteDuringUpdate is the default function for +// checking if an object should be deleted during an update. +// It checks if the new object has no finalizers, +// the existing object's deletionTimestamp is set, and +// the existing object's deletionGracePeriodSeconds is 0 or nil +func ShouldDeleteDuringUpdate(ctx context.Context, key string, obj, existing runtime.Object) bool { + newMeta, err := meta.Accessor(obj) + if err != nil { + utilruntime.HandleError(err) + return false + } + oldMeta, err := meta.Accessor(existing) + if err != nil { + utilruntime.HandleError(err) + return false + } + if len(newMeta.GetFinalizers()) > 0 { + // don't delete with finalizers remaining in the new object + return false + } + if oldMeta.GetDeletionTimestamp() == nil { + // don't delete if the existing object hasn't had a delete request made + return false + } + // delete if the existing object has no grace period or a grace period of 0 + return oldMeta.GetDeletionGracePeriodSeconds() == nil || *oldMeta.GetDeletionGracePeriodSeconds() == 0 +} + +// deleteWithoutFinalizers handles deleting an object ignoring its finalizer list. +// Used for objects that are either been finalized or have never initialized. +func (e *Store) deleteWithoutFinalizers(ctx context.Context, name, key string, obj runtime.Object, preconditions *storage.Preconditions, dryRun bool) (runtime.Object, bool, error) { + out := e.NewFunc() + klog.V(6).Infof("going to delete %s from registry, triggered by update", name) + // Using the rest.ValidateAllObjectFunc because the request is an UPDATE request and has already passed the admission for the UPDATE verb. + if err := e.Storage.Delete(ctx, key, out, preconditions, rest.ValidateAllObjectFunc, dryRun); err != nil { + // Deletion is racy, i.e., there could be multiple update + // requests to remove all finalizers from the object, so we + // ignore the NotFound error. + if storage.IsNotFound(err) { + _, err := e.finalizeDelete(ctx, obj, true) + // clients are expecting an updated object if a PUT succeeded, + // but finalizeDelete returns a metav1.Status, so return + // the object in the request instead. + return obj, false, err + } + return nil, false, storeerr.InterpretDeleteError(err, e.qualifiedResourceFromContext(ctx), name) + } + _, err := e.finalizeDelete(ctx, out, true) + // clients are expecting an updated object if a PUT succeeded, but + // finalizeDelete returns a metav1.Status, so return the object in + // the request instead. + return obj, false, err +} + +// Update performs an atomic update and set of the object. Returns the result of the update +// or an error. If the registry allows create-on-update, the create flow will be executed. +// A bool is returned along with the object and any errors, to indicate object creation. +func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, false, err + } + + var ( + creatingObj runtime.Object + creating = false + ) + + qualifiedResource := e.qualifiedResourceFromContext(ctx) + storagePreconditions := &storage.Preconditions{} + if preconditions := objInfo.Preconditions(); preconditions != nil { + storagePreconditions.UID = preconditions.UID + storagePreconditions.ResourceVersion = preconditions.ResourceVersion + } + + out := e.NewFunc() + // deleteObj is only used in case a deletion is carried out + var deleteObj runtime.Object + err = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { + existingResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(existing) + if err != nil { + return nil, nil, err + } + if existingResourceVersion == 0 { + if !e.UpdateStrategy.AllowCreateOnUpdate() && !forceAllowCreate { + return nil, nil, apierrors.NewNotFound(qualifiedResource, name) + } + } + + // Given the existing object, get the new object + obj, err := objInfo.UpdatedObject(ctx, existing) + if err != nil { + return nil, nil, err + } + + // If AllowUnconditionalUpdate() is true and the object specified by + // the user does not have a resource version, then we populate it with + // the latest version. Else, we check that the version specified by + // the user matches the version of latest storage object. + newResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) + if err != nil { + return nil, nil, err + } + doUnconditionalUpdate := newResourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate() + + if existingResourceVersion == 0 { + creating = true + creatingObj = obj + if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { + return nil, nil, err + } + // at this point we have a fully formed object. It is time to call the validators that the apiserver + // handling chain wants to enforce. + if createValidation != nil { + if err := createValidation(ctx, obj.DeepCopyObject()); err != nil { + return nil, nil, err + } + } + ttl, err := e.calculateTTL(obj, 0, false) + if err != nil { + return nil, nil, err + } + + return obj, &ttl, nil + } + + creating = false + creatingObj = nil + if doUnconditionalUpdate { + // Update the object's resource version to match the latest + // storage object's resource version. + err = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion) + if err != nil { + return nil, nil, err + } + } else { + // Check if the object's resource version matches the latest + // resource version. + if newResourceVersion == 0 { + // TODO: The Invalid error should have a field for Resource. + // After that field is added, we should fill the Resource and + // leave the Kind field empty. See the discussion in #18526. + qualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource} + fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newResourceVersion, "must be specified for an update")} + return nil, nil, apierrors.NewInvalid(qualifiedKind, name, fieldErrList) + } + if newResourceVersion != existingResourceVersion { + return nil, nil, apierrors.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg)) + } + } + if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil { + return nil, nil, err + } + // at this point we have a fully formed object. It is time to call the validators that the apiserver + // handling chain wants to enforce. + if updateValidation != nil { + if err := updateValidation(ctx, obj.DeepCopyObject(), existing.DeepCopyObject()); err != nil { + return nil, nil, err + } + } + // Check the default delete-during-update conditions, and store-specific conditions if provided + if ShouldDeleteDuringUpdate(ctx, key, obj, existing) && + (e.ShouldDeleteDuringUpdate == nil || e.ShouldDeleteDuringUpdate(ctx, key, obj, existing)) { + deleteObj = obj + return nil, nil, errEmptiedFinalizers + } + ttl, err := e.calculateTTL(obj, res.TTL, true) + if err != nil { + return nil, nil, err + } + if int64(ttl) != res.TTL { + return obj, &ttl, nil + } + return obj, nil, nil + }, dryrun.IsDryRun(options.DryRun), nil) + + if err != nil { + // delete the object + if err == errEmptiedFinalizers { + return e.deleteWithoutFinalizers(ctx, name, key, deleteObj, storagePreconditions, dryrun.IsDryRun(options.DryRun)) + } + if creating { + err = storeerr.InterpretCreateError(err, qualifiedResource, name) + err = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj) + } else { + err = storeerr.InterpretUpdateError(err, qualifiedResource, name) + } + return nil, false, err + } + + if creating { + if e.AfterCreate != nil { + if err := e.AfterCreate(out); err != nil { + return nil, false, err + } + } + } else { + if e.AfterUpdate != nil { + if err := e.AfterUpdate(out); err != nil { + return nil, false, err + } + } + } + if e.Decorator != nil { + if err := e.Decorator(out); err != nil { + return nil, false, err + } + } + return out, creating, nil +} + +// Get retrieves the item from storage. +func (e *Store) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { + obj := e.NewFunc() + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, err + } + if err := e.Storage.Get(ctx, key, storage.GetOptions{ResourceVersion: options.ResourceVersion}, obj); err != nil { + return nil, storeerr.InterpretGetError(err, e.qualifiedResourceFromContext(ctx), name) + } + if e.Decorator != nil { + if err := e.Decorator(obj); err != nil { + return nil, err + } + } + return obj, nil +} + +// qualifiedResourceFromContext attempts to retrieve a GroupResource from the context's request info. +// If the context has no request info, DefaultQualifiedResource is used. +func (e *Store) qualifiedResourceFromContext(ctx context.Context) schema.GroupResource { + if info, ok := genericapirequest.RequestInfoFrom(ctx); ok { + return schema.GroupResource{Group: info.APIGroup, Resource: info.Resource} + } + // some implementations access storage directly and thus the context has no RequestInfo + return e.DefaultQualifiedResource +} + +var ( + errAlreadyDeleting = fmt.Errorf("abort delete") + errDeleteNow = fmt.Errorf("delete now") + errEmptiedFinalizers = fmt.Errorf("emptied finalizers") +) + +// shouldOrphanDependents returns true if the finalizer for orphaning should be set +// updated for FinalizerOrphanDependents. In the order of highest to lowest +// priority, there are three factors affect whether to add/remove the +// FinalizerOrphanDependents: options, existing finalizers of the object, +// and e.DeleteStrategy.DefaultGarbageCollectionPolicy. +func shouldOrphanDependents(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool { + // Get default GC policy from this REST object type + gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy) + var defaultGCPolicy rest.GarbageCollectionPolicy + if ok { + defaultGCPolicy = gcStrategy.DefaultGarbageCollectionPolicy(ctx) + } + + if defaultGCPolicy == rest.Unsupported { + // return false to indicate that we should NOT orphan + return false + } + + // An explicit policy was set at deletion time, that overrides everything + if options != nil && options.OrphanDependents != nil { + return *options.OrphanDependents + } + if options != nil && options.PropagationPolicy != nil { + switch *options.PropagationPolicy { + case metav1.DeletePropagationOrphan: + return true + case metav1.DeletePropagationBackground, metav1.DeletePropagationForeground: + return false + } + } + + // If a finalizer is set in the object, it overrides the default + // validation should make sure the two cases won't be true at the same time. + finalizers := accessor.GetFinalizers() + for _, f := range finalizers { + switch f { + case metav1.FinalizerOrphanDependents: + return true + case metav1.FinalizerDeleteDependents: + return false + } + } + + // Get default orphan policy from this REST object type if it exists + return defaultGCPolicy == rest.OrphanDependents +} + +// shouldDeleteDependents returns true if the finalizer for foreground deletion should be set +// updated for FinalizerDeleteDependents. In the order of highest to lowest +// priority, there are three factors affect whether to add/remove the +// FinalizerDeleteDependents: options, existing finalizers of the object, and +// e.DeleteStrategy.DefaultGarbageCollectionPolicy. +func shouldDeleteDependents(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool { + // Get default GC policy from this REST object type + if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok && gcStrategy.DefaultGarbageCollectionPolicy(ctx) == rest.Unsupported { + // return false to indicate that we should NOT delete in foreground + return false + } + + // If an explicit policy was set at deletion time, that overrides both + if options != nil && options.OrphanDependents != nil { + return false + } + if options != nil && options.PropagationPolicy != nil { + switch *options.PropagationPolicy { + case metav1.DeletePropagationForeground: + return true + case metav1.DeletePropagationBackground, metav1.DeletePropagationOrphan: + return false + } + } + + // If a finalizer is set in the object, it overrides the default + // validation has made sure the two cases won't be true at the same time. + finalizers := accessor.GetFinalizers() + for _, f := range finalizers { + switch f { + case metav1.FinalizerDeleteDependents: + return true + case metav1.FinalizerOrphanDependents: + return false + } + } + + return false +} + +// deletionFinalizersForGarbageCollection analyzes the object and delete options +// to determine whether the object is in need of finalization by the garbage +// collector. If so, returns the set of deletion finalizers to apply and a bool +// indicating whether the finalizer list has changed and is in need of updating. +// +// The finalizers returned are intended to be handled by the garbage collector. +// If garbage collection is disabled for the store, this function returns false +// to ensure finalizers aren't set which will never be cleared. +func deletionFinalizersForGarbageCollection(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) (bool, []string) { + if !e.EnableGarbageCollection { + return false, []string{} + } + shouldOrphan := shouldOrphanDependents(ctx, e, accessor, options) + shouldDeleteDependentInForeground := shouldDeleteDependents(ctx, e, accessor, options) + newFinalizers := []string{} + + // first remove both finalizers, add them back if needed. + for _, f := range accessor.GetFinalizers() { + if f == metav1.FinalizerOrphanDependents || f == metav1.FinalizerDeleteDependents { + continue + } + newFinalizers = append(newFinalizers, f) + } + + if shouldOrphan { + newFinalizers = append(newFinalizers, metav1.FinalizerOrphanDependents) + } + if shouldDeleteDependentInForeground { + newFinalizers = append(newFinalizers, metav1.FinalizerDeleteDependents) + } + + oldFinalizerSet := sets.NewString(accessor.GetFinalizers()...) + newFinalizersSet := sets.NewString(newFinalizers...) + if oldFinalizerSet.Equal(newFinalizersSet) { + return false, accessor.GetFinalizers() + } + return true, newFinalizers +} + +// markAsDeleting sets the obj's DeletionGracePeriodSeconds to 0, and sets the +// DeletionTimestamp to "now" if there is no existing deletionTimestamp or if the existing +// deletionTimestamp is further in future. Finalizers are watching for such updates and will +// finalize the object if their IDs are present in the object's Finalizers list. +func markAsDeleting(obj runtime.Object, now time.Time) (err error) { + objectMeta, kerr := meta.Accessor(obj) + if kerr != nil { + return kerr + } + // This handles Generation bump for resources that don't support graceful + // deletion. For resources that support graceful deletion is handle in + // pkg/api/rest/delete.go + if objectMeta.GetDeletionTimestamp() == nil && objectMeta.GetGeneration() > 0 { + objectMeta.SetGeneration(objectMeta.GetGeneration() + 1) + } + existingDeletionTimestamp := objectMeta.GetDeletionTimestamp() + if existingDeletionTimestamp == nil || existingDeletionTimestamp.After(now) { + metaNow := metav1.NewTime(now) + objectMeta.SetDeletionTimestamp(&metaNow) + } + var zero int64 = 0 + objectMeta.SetDeletionGracePeriodSeconds(&zero) + return nil +} + +// updateForGracefulDeletionAndFinalizers updates the given object for +// graceful deletion and finalization by setting the deletion timestamp and +// grace period seconds (graceful deletion) and updating the list of +// finalizers (finalization); it returns: +// +// 1. an error +// 2. a boolean indicating that the object was not found, but it should be +// ignored +// 3. a boolean indicating that the object's grace period is exhausted and it +// should be deleted immediately +// 4. a new output object with the state that was updated +// 5. a copy of the last existing state of the object +func (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, deleteValidation rest.ValidateObjectFunc, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) { + lastGraceful := int64(0) + var pendingFinalizers bool + out = e.NewFunc() + err = e.Storage.GuaranteedUpdate( + ctx, + key, + out, + false, /* ignoreNotFound */ + &preconditions, + storage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) { + if err := deleteValidation(ctx, existing); err != nil { + return nil, err + } + graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options) + if err != nil { + return nil, err + } + if pendingGraceful { + return nil, errAlreadyDeleting + } + + // Add/remove the orphan finalizer as the options dictates. + // Note that this occurs after checking pendingGraceufl, so + // finalizers cannot be updated via DeleteOptions if deletion has + // started. + existingAccessor, err := meta.Accessor(existing) + if err != nil { + return nil, err + } + needsUpdate, newFinalizers := deletionFinalizersForGarbageCollection(ctx, e, existingAccessor, options) + if needsUpdate { + existingAccessor.SetFinalizers(newFinalizers) + } + + pendingFinalizers = len(existingAccessor.GetFinalizers()) != 0 + if !graceful { + // set the DeleteGracePeriods to 0 if the object has pendingFinalizers but not supporting graceful deletion + if pendingFinalizers { + klog.V(6).Infof("update the DeletionTimestamp to \"now\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers", name) + err = markAsDeleting(existing, time.Now()) + if err != nil { + return nil, err + } + return existing, nil + } + return nil, errDeleteNow + } + lastGraceful = *options.GracePeriodSeconds + lastExisting = existing + return existing, nil + }), + dryrun.IsDryRun(options.DryRun), + nil, + ) + switch err { + case nil: + // If there are pending finalizers, we never delete the object immediately. + if pendingFinalizers { + return nil, false, false, out, lastExisting + } + if lastGraceful > 0 { + return nil, false, false, out, lastExisting + } + // If we are here, the registry supports grace period mechanism and + // we are intentionally delete gracelessly. In this case, we may + // enter a race with other k8s components. If other component wins + // the race, the object will not be found, and we should tolerate + // the NotFound error. See + // https://github.com/kubernetes/kubernetes/issues/19403 for + // details. + return nil, true, true, out, lastExisting + case errDeleteNow: + // we've updated the object to have a zero grace period, or it's already at 0, so + // we should fall through and truly delete the object. + return nil, false, true, out, lastExisting + case errAlreadyDeleting: + out, err = e.finalizeDelete(ctx, in, true) + return err, false, false, out, lastExisting + default: + return storeerr.InterpretUpdateError(err, e.qualifiedResourceFromContext(ctx), name), false, false, out, lastExisting + } +} + +// Delete removes the item from storage. +func (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) { + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, false, err + } + obj := e.NewFunc() + qualifiedResource := e.qualifiedResourceFromContext(ctx) + if err = e.Storage.Get(ctx, key, storage.GetOptions{}, obj); err != nil { + return nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name) + } + + // support older consumers of delete by treating "nil" as delete immediately + if options == nil { + options = metav1.NewDeleteOptions(0) + } + var preconditions storage.Preconditions + if options.Preconditions != nil { + preconditions.UID = options.Preconditions.UID + preconditions.ResourceVersion = options.Preconditions.ResourceVersion + } + graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, obj, options) + if err != nil { + return nil, false, err + } + // this means finalizers cannot be updated via DeleteOptions if a deletion is already pending + if pendingGraceful { + out, err := e.finalizeDelete(ctx, obj, false) + return out, false, err + } + // check if obj has pending finalizers + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, false, apierrors.NewInternalError(err) + } + pendingFinalizers := len(accessor.GetFinalizers()) != 0 + var ignoreNotFound bool + var deleteImmediately bool = true + var lastExisting, out runtime.Object + + // Handle combinations of graceful deletion and finalization by issuing + // the correct updates. + shouldUpdateFinalizers, _ := deletionFinalizersForGarbageCollection(ctx, e, accessor, options) + // TODO: remove the check, because we support no-op updates now. + if graceful || pendingFinalizers || shouldUpdateFinalizers { + err, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletionAndFinalizers(ctx, name, key, options, preconditions, deleteValidation, obj) + // Update the preconditions.ResourceVersion if set since we updated the object. + if err == nil && deleteImmediately && preconditions.ResourceVersion != nil { + accessor, err = meta.Accessor(out) + if err != nil { + return out, false, apierrors.NewInternalError(err) + } + resourceVersion := accessor.GetResourceVersion() + preconditions.ResourceVersion = &resourceVersion + } + } + + // !deleteImmediately covers all cases where err != nil. We keep both to be future-proof. + if !deleteImmediately || err != nil { + return out, false, err + } + + // Going further in this function is not useful when we are + // performing a dry-run request. Worse, it will actually + // override "out" with the version of the object in database + // that doesn't have the finalizer and deletiontimestamp set + // (because the update above was dry-run too). If we already + // have that version available, let's just return it now, + // otherwise, we can call dry-run delete that will get us the + // latest version of the object. + if dryrun.IsDryRun(options.DryRun) && out != nil { + return out, true, nil + } + + // delete immediately, or no graceful deletion supported + klog.V(6).Infof("going to delete %s from registry: ", name) + out = e.NewFunc() + if err := e.Storage.Delete(ctx, key, out, &preconditions, storage.ValidateObjectFunc(deleteValidation), dryrun.IsDryRun(options.DryRun)); err != nil { + // Please refer to the place where we set ignoreNotFound for the reason + // why we ignore the NotFound error . + if storage.IsNotFound(err) && ignoreNotFound && lastExisting != nil { + // The lastExisting object may not be the last state of the object + // before its deletion, but it's the best approximation. + out, err := e.finalizeDelete(ctx, lastExisting, true) + return out, true, err + } + return nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name) + } + out, err = e.finalizeDelete(ctx, out, true) + return out, true, err +} + +// DeleteReturnsDeletedObject implements the rest.MayReturnFullObjectDeleter interface +func (e *Store) DeleteReturnsDeletedObject() bool { + return e.ReturnDeletedObject +} + +// DeleteCollection removes all items returned by List with a given ListOptions from storage. +// +// DeleteCollection is currently NOT atomic. It can happen that only subset of objects +// will be deleted from storage, and then an error will be returned. +// In case of success, the list of deleted objects will be returned. +// +// TODO: Currently, there is no easy way to remove 'directory' entry from storage (if we +// are removing all objects of a given type) with the current API (it's technically +// possibly with storage API, but watch is not delivered correctly then). +// It will be possible to fix it with v3 etcd API. +func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) { + if listOptions == nil { + listOptions = &metainternalversion.ListOptions{} + } else { + listOptions = listOptions.DeepCopy() + } + + listObj, err := e.List(ctx, listOptions) + if err != nil { + return nil, err + } + items, err := meta.ExtractList(listObj) + if err != nil { + return nil, err + } + if len(items) == 0 { + // Nothing to delete, return now + return listObj, nil + } + // Spawn a number of goroutines, so that we can issue requests to storage + // in parallel to speed up deletion. + // It is proportional to the number of items to delete, up to + // DeleteCollectionWorkers (it doesn't make much sense to spawn 16 + // workers to delete 10 items). + workersNumber := e.DeleteCollectionWorkers + if workersNumber > len(items) { + workersNumber = len(items) + } + if workersNumber < 1 { + workersNumber = 1 + } + wg := sync.WaitGroup{} + toProcess := make(chan int, 2*workersNumber) + errs := make(chan error, workersNumber+1) + + go func() { + defer utilruntime.HandleCrash(func(panicReason interface{}) { + errs <- fmt.Errorf("DeleteCollection distributor panicked: %v", panicReason) + }) + for i := 0; i < len(items); i++ { + toProcess <- i + } + close(toProcess) + }() + + wg.Add(workersNumber) + for i := 0; i < workersNumber; i++ { + go func() { + // panics don't cross goroutine boundaries + defer utilruntime.HandleCrash(func(panicReason interface{}) { + errs <- fmt.Errorf("DeleteCollection goroutine panicked: %v", panicReason) + }) + defer wg.Done() + + for index := range toProcess { + accessor, err := meta.Accessor(items[index]) + if err != nil { + errs <- err + return + } + if _, _, err := e.Delete(ctx, accessor.GetName(), deleteValidation, options); err != nil && !apierrors.IsNotFound(err) { + klog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err) + errs <- err + return + } + } + }() + } + wg.Wait() + select { + case err := <-errs: + return nil, err + default: + return listObj, nil + } +} + +// finalizeDelete runs the Store's AfterDelete hook if runHooks is set and +// returns the decorated deleted object if appropriate. +func (e *Store) finalizeDelete(ctx context.Context, obj runtime.Object, runHooks bool) (runtime.Object, error) { + if runHooks && e.AfterDelete != nil { + if err := e.AfterDelete(obj); err != nil { + return nil, err + } + } + if e.ReturnDeletedObject { + if e.Decorator != nil { + if err := e.Decorator(obj); err != nil { + return nil, err + } + } + return obj, nil + } + // Return information about the deleted object, which enables clients to + // verify that the object was actually deleted and not waiting for finalizers. + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + qualifiedResource := e.qualifiedResourceFromContext(ctx) + details := &metav1.StatusDetails{ + Name: accessor.GetName(), + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, // Yes we set Kind field to resource. + UID: accessor.GetUID(), + } + status := &metav1.Status{Status: metav1.StatusSuccess, Details: details} + return status, nil +} + +// Watch makes a matcher for the given label and field, and calls +// WatchPredicate. If possible, you should customize PredicateFunc to produce +// a matcher that matches by key. SelectionPredicate does this for you +// automatically. +func (e *Store) Watch(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) { + label := labels.Everything() + if options != nil && options.LabelSelector != nil { + label = options.LabelSelector + } + field := fields.Everything() + if options != nil && options.FieldSelector != nil { + field = options.FieldSelector + } + predicate := e.PredicateFunc(label, field) + + resourceVersion := "" + if options != nil { + resourceVersion = options.ResourceVersion + predicate.AllowWatchBookmarks = options.AllowWatchBookmarks + } + return e.WatchPredicate(ctx, predicate, resourceVersion) +} + +// WatchPredicate starts a watch for the items that matches. +func (e *Store) WatchPredicate(ctx context.Context, p storage.SelectionPredicate, resourceVersion string) (watch.Interface, error) { + storageOpts := storage.ListOptions{ResourceVersion: resourceVersion, Predicate: p} + if name, ok := p.MatchesSingle(); ok { + if key, err := e.KeyFunc(ctx, name); err == nil { + w, err := e.Storage.Watch(ctx, key, storageOpts) + if err != nil { + return nil, err + } + if e.Decorator != nil { + return newDecoratedWatcher(w, e.Decorator), nil + } + return w, nil + } + // if we cannot extract a key based on the current context, the + // optimization is skipped + } + + w, err := e.Storage.WatchList(ctx, e.KeyRootFunc(ctx), storageOpts) + if err != nil { + return nil, err + } + if e.Decorator != nil { + return newDecoratedWatcher(w, e.Decorator), nil + } + return w, nil +} + +// calculateTTL is a helper for retrieving the updated TTL for an object or +// returning an error if the TTL cannot be calculated. The defaultTTL is +// changed to 1 if less than zero. Zero means no TTL, not expire immediately. +func (e *Store) calculateTTL(obj runtime.Object, defaultTTL int64, update bool) (ttl uint64, err error) { + // TODO: validate this is assertion is still valid. + + // etcd may return a negative TTL for a node if the expiration has not + // occurred due to server lag - we will ensure that the value is at least + // set. + if defaultTTL < 0 { + defaultTTL = 1 + } + ttl = uint64(defaultTTL) + if e.TTLFunc != nil { + ttl, err = e.TTLFunc(obj, ttl, update) + } + return ttl, err +} + +// exportObjectMeta unsets the fields on the given object that should not be +// present when the object is exported. +func exportObjectMeta(accessor metav1.Object, exact bool) { + accessor.SetUID("") + if !exact { + accessor.SetNamespace("") + } + accessor.SetCreationTimestamp(metav1.Time{}) + accessor.SetDeletionTimestamp(nil) + accessor.SetResourceVersion("") + accessor.SetSelfLink("") + if len(accessor.GetGenerateName()) > 0 && !exact { + accessor.SetName("") + } +} + +// Export implements the rest.Exporter interface +func (e *Store) Export(ctx context.Context, name string, opts metav1.ExportOptions) (runtime.Object, error) { + obj, err := e.Get(ctx, name, &metav1.GetOptions{}) + if err != nil { + return nil, err + } + if accessor, err := meta.Accessor(obj); err == nil { + exportObjectMeta(accessor, opts.Exact) + } else { + klog.V(4).Infof("Object of type %v does not have ObjectMeta: %v", reflect.TypeOf(obj), err) + } + + if e.ExportStrategy != nil { + if err = e.ExportStrategy.Export(ctx, obj, opts.Exact); err != nil { + return nil, err + } + } else { + e.CreateStrategy.PrepareForCreate(ctx, obj) + } + return obj, nil +} + +// CompleteWithOptions updates the store with the provided options and +// defaults common fields. +func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { + if e.DefaultQualifiedResource.Empty() { + return fmt.Errorf("store %#v must have a non-empty qualified resource", e) + } + if e.NewFunc == nil { + return fmt.Errorf("store for %s must have NewFunc set", e.DefaultQualifiedResource.String()) + } + if e.NewListFunc == nil { + return fmt.Errorf("store for %s must have NewListFunc set", e.DefaultQualifiedResource.String()) + } + if (e.KeyRootFunc == nil) != (e.KeyFunc == nil) { + return fmt.Errorf("store for %s must set both KeyRootFunc and KeyFunc or neither", e.DefaultQualifiedResource.String()) + } + + if e.TableConvertor == nil { + return fmt.Errorf("store for %s must set TableConvertor; rest.NewDefaultTableConvertor(e.DefaultQualifiedResource) can be used to output just name/creation time", e.DefaultQualifiedResource.String()) + } + + var isNamespaced bool + switch { + case e.CreateStrategy != nil: + isNamespaced = e.CreateStrategy.NamespaceScoped() + case e.UpdateStrategy != nil: + isNamespaced = e.UpdateStrategy.NamespaceScoped() + default: + return fmt.Errorf("store for %s must have CreateStrategy or UpdateStrategy set", e.DefaultQualifiedResource.String()) + } + + if e.DeleteStrategy == nil { + return fmt.Errorf("store for %s must have DeleteStrategy set", e.DefaultQualifiedResource.String()) + } + + if options.RESTOptions == nil { + return fmt.Errorf("options for %s must have RESTOptions set", e.DefaultQualifiedResource.String()) + } + + attrFunc := options.AttrFunc + if attrFunc == nil { + if isNamespaced { + attrFunc = storage.DefaultNamespaceScopedAttr + } else { + attrFunc = storage.DefaultClusterScopedAttr + } + } + if e.PredicateFunc == nil { + e.PredicateFunc = func(label labels.Selector, field fields.Selector) storage.SelectionPredicate { + return storage.SelectionPredicate{ + Label: label, + Field: field, + GetAttrs: attrFunc, + } + } + } + + err := validateIndexers(options.Indexers) + if err != nil { + return err + } + + opts, err := options.RESTOptions.GetRESTOptions(e.DefaultQualifiedResource) + if err != nil { + return err + } + + // ResourcePrefix must come from the underlying factory + prefix := opts.ResourcePrefix + if !strings.HasPrefix(prefix, "/") { + prefix = "/" + prefix + } + if prefix == "/" { + return fmt.Errorf("store for %s has an invalid prefix %q", e.DefaultQualifiedResource.String(), opts.ResourcePrefix) + } + + // Set the default behavior for storage key generation + if e.KeyRootFunc == nil && e.KeyFunc == nil { + if isNamespaced { + e.KeyRootFunc = func(ctx context.Context) string { + return NamespaceKeyRootFunc(ctx, prefix) + } + e.KeyFunc = func(ctx context.Context, name string) (string, error) { + return NamespaceKeyFunc(ctx, prefix, name) + } + } else { + e.KeyRootFunc = func(ctx context.Context) string { + return prefix + } + e.KeyFunc = func(ctx context.Context, name string) (string, error) { + return NoNamespaceKeyFunc(ctx, prefix, name) + } + } + } + + // We adapt the store's keyFunc so that we can use it with the StorageDecorator + // without making any assumptions about where objects are stored in etcd + keyFunc := func(obj runtime.Object) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + + if isNamespaced { + return e.KeyFunc(genericapirequest.WithNamespace(genericapirequest.NewContext(), accessor.GetNamespace()), accessor.GetName()) + } + + return e.KeyFunc(genericapirequest.NewContext(), accessor.GetName()) + } + + if e.DeleteCollectionWorkers == 0 { + e.DeleteCollectionWorkers = opts.DeleteCollectionWorkers + } + + e.EnableGarbageCollection = opts.EnableGarbageCollection + + if e.ObjectNameFunc == nil { + e.ObjectNameFunc = func(obj runtime.Object) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetName(), nil + } + } + + if e.Storage.Storage == nil { + e.Storage.Codec = opts.StorageConfig.Codec + var err error + e.Storage.Storage, e.DestroyFunc, err = opts.Decorator( + opts.StorageConfig, + prefix, + keyFunc, + e.NewFunc, + e.NewListFunc, + attrFunc, + options.TriggerFunc, + options.Indexers, + ) + if err != nil { + return err + } + e.StorageVersioner = opts.StorageConfig.EncodeVersioner + + if opts.CountMetricPollPeriod > 0 { + stopFunc := e.startObservingCount(opts.CountMetricPollPeriod) + previousDestroy := e.DestroyFunc + e.DestroyFunc = func() { + stopFunc() + if previousDestroy != nil { + previousDestroy() + } + } + } + } + + return nil +} + +// startObservingCount starts monitoring given prefix and periodically updating metrics. It returns a function to stop collection. +func (e *Store) startObservingCount(period time.Duration) func() { + prefix := e.KeyRootFunc(genericapirequest.NewContext()) + resourceName := e.DefaultQualifiedResource.String() + klog.V(2).Infof("Monitoring %v count at /%v", resourceName, prefix) + stopCh := make(chan struct{}) + go wait.JitterUntil(func() { + count, err := e.Storage.Count(prefix) + if err != nil { + klog.V(5).Infof("Failed to update storage count metric: %v", err) + metrics.UpdateObjectCount(resourceName, -1) + } else { + metrics.UpdateObjectCount(resourceName, count) + } + }, period, resourceCountPollPeriodJitter, true, stopCh) + return func() { close(stopCh) } +} + +func (e *Store) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) { + if e.TableConvertor != nil { + return e.TableConvertor.ConvertToTable(ctx, object, tableOptions) + } + return rest.NewDefaultTableConvertor(e.DefaultQualifiedResource).ConvertToTable(ctx, object, tableOptions) +} + +func (e *Store) StorageVersion() runtime.GroupVersioner { + return e.StorageVersioner +} + +// validateIndexers will check the prefix of indexers. +func validateIndexers(indexers *cache.Indexers) error { + if indexers == nil { + return nil + } + for indexName := range *indexers { + if len(indexName) <= 2 || (indexName[:2] != "l:" && indexName[:2] != "f:") { + return fmt.Errorf("index must prefix with \"l:\" or \"f:\"") + } + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go new file mode 100644 index 0000000000..e0ca2df04c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go @@ -0,0 +1,58 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/apiserver/pkg/storage/storagebackend/factory" + "k8s.io/client-go/tools/cache" +) + +// StorageDecorator is a function signature for producing a storage.Interface +// and an associated DestroyFunc from given parameters. +type StorageDecorator func( + config *storagebackend.Config, + resourcePrefix string, + keyFunc func(obj runtime.Object) (string, error), + newFunc func() runtime.Object, + newListFunc func() runtime.Object, + getAttrsFunc storage.AttrFunc, + trigger storage.IndexerFuncs, + indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) + +// UndecoratedStorage returns the given a new storage from the given config +// without any decoration. +func UndecoratedStorage( + config *storagebackend.Config, + resourcePrefix string, + keyFunc func(obj runtime.Object) (string, error), + newFunc func() runtime.Object, + newListFunc func() runtime.Object, + getAttrsFunc storage.AttrFunc, + trigger storage.IndexerFuncs, + indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) { + return NewRawStorage(config, newFunc) +} + +// NewRawStorage creates the low level kv storage. This is a work-around for current +// two layer of same storage interface. +// TODO: Once cacher is enabled on all registries (event registry is special), we will remove this method. +func NewRawStorage(config *storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, factory.DestroyFunc, error) { + return factory.Create(*config, newFunc) +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS b/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS new file mode 100644 index 0000000000..55e6178184 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS @@ -0,0 +1,23 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- justinsb +- ncdc +- dims +- hongchaodeng +- krousey +- ingvagabund +- jianhuiz +- sdminonne +- enj diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/create.go b/vendor/k8s.io/apiserver/pkg/registry/rest/create.go new file mode 100644 index 0000000000..dd70a4eb78 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/create.go @@ -0,0 +1,192 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + genericvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/api/validation/path" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage/names" + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +// RESTCreateStrategy defines the minimum validation, accepted input, and +// name generation behavior to create an object that follows Kubernetes +// API conventions. +type RESTCreateStrategy interface { + runtime.ObjectTyper + // The name generator is used when the standard GenerateName field is set. + // The NameGenerator will be invoked prior to validation. + names.NameGenerator + + // NamespaceScoped returns true if the object must be within a namespace. + NamespaceScoped() bool + // PrepareForCreate is invoked on create before validation to normalize + // the object. For example: remove fields that are not to be persisted, + // sort order-insensitive list fields, etc. This should not remove fields + // whose presence would be considered a validation error. + // + // Often implemented as a type check and an initailization or clearing of + // status. Clear the status because status changes are internal. External + // callers of an api (users) should not be setting an initial status on + // newly created objects. + PrepareForCreate(ctx context.Context, obj runtime.Object) + // Validate returns an ErrorList with validation errors or nil. Validate + // is invoked after default fields in the object have been filled in + // before the object is persisted. This method should not mutate the + // object. + Validate(ctx context.Context, obj runtime.Object) field.ErrorList + // Canonicalize allows an object to be mutated into a canonical form. This + // ensures that code that operates on these objects can rely on the common + // form for things like comparison. Canonicalize is invoked after + // validation has succeeded but before the object has been persisted. + // This method may mutate the object. Often implemented as a type check or + // empty method. + Canonicalize(obj runtime.Object) +} + +// BeforeCreate ensures that common operations for all resources are performed on creation. It only returns +// errors that can be converted to api.Status. It invokes PrepareForCreate, then GenerateName, then Validate. +// It returns nil if the object should be created. +func BeforeCreate(strategy RESTCreateStrategy, ctx context.Context, obj runtime.Object) error { + objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) + if kerr != nil { + return kerr + } + + if strategy.NamespaceScoped() { + if !ValidNamespace(ctx, objectMeta) { + return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request") + } + } else if len(objectMeta.GetNamespace()) > 0 { + objectMeta.SetNamespace(metav1.NamespaceNone) + } + objectMeta.SetDeletionTimestamp(nil) + objectMeta.SetDeletionGracePeriodSeconds(nil) + strategy.PrepareForCreate(ctx, obj) + FillObjectMetaSystemFields(objectMeta) + if len(objectMeta.GetGenerateName()) > 0 && len(objectMeta.GetName()) == 0 { + objectMeta.SetName(strategy.GenerateName(objectMeta.GetGenerateName())) + } + + // Ensure managedFields is not set unless the feature is enabled + if !utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { + objectMeta.SetManagedFields(nil) + } + + // ClusterName is ignored and should not be saved + if len(objectMeta.GetClusterName()) > 0 { + objectMeta.SetClusterName("") + } + + if errs := strategy.Validate(ctx, obj); len(errs) > 0 { + return errors.NewInvalid(kind.GroupKind(), objectMeta.GetName(), errs) + } + + // Custom validation (including name validation) passed + // Now run common validation on object meta + // Do this *after* custom validation so that specific error messages are shown whenever possible + if errs := genericvalidation.ValidateObjectMetaAccessor(objectMeta, strategy.NamespaceScoped(), path.ValidatePathSegmentName, field.NewPath("metadata")); len(errs) > 0 { + return errors.NewInvalid(kind.GroupKind(), objectMeta.GetName(), errs) + } + + strategy.Canonicalize(obj) + + return nil +} + +// CheckGeneratedNameError checks whether an error that occurred creating a resource is due +// to generation being unable to pick a valid name. +func CheckGeneratedNameError(strategy RESTCreateStrategy, err error, obj runtime.Object) error { + if !errors.IsAlreadyExists(err) { + return err + } + + objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) + if kerr != nil { + return kerr + } + + if len(objectMeta.GetGenerateName()) == 0 { + return err + } + + return errors.NewServerTimeoutForKind(kind.GroupKind(), "POST", 0) +} + +// objectMetaAndKind retrieves kind and ObjectMeta from a runtime object, or returns an error. +func objectMetaAndKind(typer runtime.ObjectTyper, obj runtime.Object) (metav1.Object, schema.GroupVersionKind, error) { + objectMeta, err := meta.Accessor(obj) + if err != nil { + return nil, schema.GroupVersionKind{}, errors.NewInternalError(err) + } + kinds, _, err := typer.ObjectKinds(obj) + if err != nil { + return nil, schema.GroupVersionKind{}, errors.NewInternalError(err) + } + return objectMeta, kinds[0], nil +} + +// NamespaceScopedStrategy has a method to tell if the object must be in a namespace. +type NamespaceScopedStrategy interface { + // NamespaceScoped returns if the object must be in a namespace. + NamespaceScoped() bool +} + +// AdmissionToValidateObjectFunc converts validating admission to a rest validate object func +func AdmissionToValidateObjectFunc(admit admission.Interface, staticAttributes admission.Attributes, o admission.ObjectInterfaces) ValidateObjectFunc { + validatingAdmission, ok := admit.(admission.ValidationInterface) + if !ok { + return func(ctx context.Context, obj runtime.Object) error { return nil } + } + return func(ctx context.Context, obj runtime.Object) error { + name := staticAttributes.GetName() + // in case the generated name is populated + if len(name) == 0 { + if metadata, err := meta.Accessor(obj); err == nil { + name = metadata.GetName() + } + } + + finalAttributes := admission.NewAttributesRecord( + obj, + staticAttributes.GetOldObject(), + staticAttributes.GetKind(), + staticAttributes.GetNamespace(), + name, + staticAttributes.GetResource(), + staticAttributes.GetSubresource(), + staticAttributes.GetOperation(), + staticAttributes.GetOperationOptions(), + staticAttributes.IsDryRun(), + staticAttributes.GetUserInfo(), + ) + if !validatingAdmission.Handles(finalAttributes.GetOperation()) { + return nil + } + return validatingAdmission.Validate(ctx, finalAttributes, o) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/create_update.go b/vendor/k8s.io/apiserver/pkg/registry/rest/create_update.go new file mode 100644 index 0000000000..37d6c8f8ad --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/create_update.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// RESTCreateUpdateStrategy is a union of RESTUpdateStrategy and RESTCreateStrategy, +// and it defines the minimum validation, accepted input, and name generation +// behavior to create and update an object that follows Kubernetes API conventions. +type RESTCreateUpdateStrategy interface { + RESTCreateStrategy + // AllowCreateOnUpdate returns true if the object can be created by a PUT. + AllowCreateOnUpdate() bool + // PrepareForUpdate is invoked on update before validation to normalize + // the object. For example: remove fields that are not to be persisted, + // sort order-insensitive list fields, etc. This should not remove fields + // whose presence would be considered a validation error. + PrepareForUpdate(ctx context.Context, obj, old runtime.Object) + // ValidateUpdate is invoked after default fields in the object have been + // filled in before the object is persisted. This method should not mutate + // the object. + ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList + // AllowUnconditionalUpdate returns true if the object can be updated + // unconditionally (irrespective of the latest resource version), when + // there is no resource version specified in the object. + AllowUnconditionalUpdate() bool +} + +// Ensure that RESTCreateUpdateStrategy extends RESTCreateStrategy +var _ RESTCreateStrategy = (RESTCreateUpdateStrategy)(nil) + +// Ensure that RESTCreateUpdateStrategy extends RESTUpdateStrategy +var _ RESTUpdateStrategy = (RESTCreateUpdateStrategy)(nil) diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/delete.go b/vendor/k8s.io/apiserver/pkg/registry/rest/delete.go new file mode 100644 index 0000000000..3e7ca85b76 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/delete.go @@ -0,0 +1,183 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +// RESTDeleteStrategy defines deletion behavior on an object that follows Kubernetes +// API conventions. +type RESTDeleteStrategy interface { + runtime.ObjectTyper +} + +type GarbageCollectionPolicy string + +const ( + DeleteDependents GarbageCollectionPolicy = "DeleteDependents" + OrphanDependents GarbageCollectionPolicy = "OrphanDependents" + // Unsupported means that the resource knows that it cannot be GC'd, so the finalizers + // should never be set in storage. + Unsupported GarbageCollectionPolicy = "Unsupported" +) + +// GarbageCollectionDeleteStrategy must be implemented by the registry that wants to +// orphan dependents by default. +type GarbageCollectionDeleteStrategy interface { + // DefaultGarbageCollectionPolicy returns the default garbage collection behavior. + DefaultGarbageCollectionPolicy(ctx context.Context) GarbageCollectionPolicy +} + +// RESTGracefulDeleteStrategy must be implemented by the registry that supports +// graceful deletion. +type RESTGracefulDeleteStrategy interface { + // CheckGracefulDelete should return true if the object can be gracefully deleted and set + // any default values on the DeleteOptions. + CheckGracefulDelete(ctx context.Context, obj runtime.Object, options *metav1.DeleteOptions) bool +} + +// BeforeDelete tests whether the object can be gracefully deleted. +// If graceful is set, the object should be gracefully deleted. If gracefulPending +// is set, the object has already been gracefully deleted (and the provided grace +// period is longer than the time to deletion). An error is returned if the +// condition cannot be checked or the gracePeriodSeconds is invalid. The options +// argument may be updated with default values if graceful is true. Second place +// where we set deletionTimestamp is pkg/registry/generic/registry/store.go. +// This function is responsible for setting deletionTimestamp during gracefulDeletion, +// other one for cascading deletions. +func BeforeDelete(strategy RESTDeleteStrategy, ctx context.Context, obj runtime.Object, options *metav1.DeleteOptions) (graceful, gracefulPending bool, err error) { + objectMeta, gvk, kerr := objectMetaAndKind(strategy, obj) + if kerr != nil { + return false, false, kerr + } + if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { + return false, false, errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) + } + // Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too. + if options.Preconditions != nil { + if options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.GetUID() { + return false, false, errors.NewConflict(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.GetName(), fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.GetUID())) + } + if options.Preconditions.ResourceVersion != nil && *options.Preconditions.ResourceVersion != objectMeta.GetResourceVersion() { + return false, false, errors.NewConflict(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.GetName(), fmt.Errorf("the ResourceVersion in the precondition (%s) does not match the ResourceVersion in record (%s). The object might have been modified", *options.Preconditions.ResourceVersion, objectMeta.GetResourceVersion())) + } + } + gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy) + if !ok { + // If we're not deleting gracefully there's no point in updating Generation, as we won't update + // the obcject before deleting it. + return false, false, nil + } + // if the object is already being deleted, no need to update generation. + if objectMeta.GetDeletionTimestamp() != nil { + // if we are already being deleted, we may only shorten the deletion grace period + // this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set, + // so we force deletion immediately + // IMPORTANT: + // The deletion operation happens in two phases. + // 1. Update to set DeletionGracePeriodSeconds and DeletionTimestamp + // 2. Delete the object from storage. + // If the update succeeds, but the delete fails (network error, internal storage error, etc.), + // a resource was previously left in a state that was non-recoverable. We + // check if the existing stored resource has a grace period as 0 and if so + // attempt to delete immediately in order to recover from this scenario. + if objectMeta.GetDeletionGracePeriodSeconds() == nil || *objectMeta.GetDeletionGracePeriodSeconds() == 0 { + return false, false, nil + } + // only a shorter grace period may be provided by a user + if options.GracePeriodSeconds != nil { + period := int64(*options.GracePeriodSeconds) + if period >= *objectMeta.GetDeletionGracePeriodSeconds() { + return false, true, nil + } + newDeletionTimestamp := metav1.NewTime( + objectMeta.GetDeletionTimestamp().Add(-time.Second * time.Duration(*objectMeta.GetDeletionGracePeriodSeconds())). + Add(time.Second * time.Duration(*options.GracePeriodSeconds))) + objectMeta.SetDeletionTimestamp(&newDeletionTimestamp) + objectMeta.SetDeletionGracePeriodSeconds(&period) + return true, false, nil + } + // graceful deletion is pending, do nothing + options.GracePeriodSeconds = objectMeta.GetDeletionGracePeriodSeconds() + return false, true, nil + } + + if !gracefulStrategy.CheckGracefulDelete(ctx, obj, options) { + return false, false, nil + } + now := metav1.NewTime(metav1.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) + objectMeta.SetDeletionTimestamp(&now) + objectMeta.SetDeletionGracePeriodSeconds(options.GracePeriodSeconds) + // If it's the first graceful deletion we are going to set the DeletionTimestamp to non-nil. + // Controllers of the object that's being deleted shouldn't take any nontrivial actions, hence its behavior changes. + // Thus we need to bump object's Generation (if set). This handles generation bump during graceful deletion. + // The bump for objects that don't support graceful deletion is handled in pkg/registry/generic/registry/store.go. + if objectMeta.GetGeneration() > 0 { + objectMeta.SetGeneration(objectMeta.GetGeneration() + 1) + } + return true, false, nil +} + +// AdmissionToValidateObjectDeleteFunc returns a admission validate func for object deletion +func AdmissionToValidateObjectDeleteFunc(admit admission.Interface, staticAttributes admission.Attributes, objInterfaces admission.ObjectInterfaces) ValidateObjectFunc { + mutatingAdmission, isMutatingAdmission := admit.(admission.MutationInterface) + validatingAdmission, isValidatingAdmission := admit.(admission.ValidationInterface) + + mutating := isMutatingAdmission && mutatingAdmission.Handles(staticAttributes.GetOperation()) + validating := isValidatingAdmission && validatingAdmission.Handles(staticAttributes.GetOperation()) + + return func(ctx context.Context, old runtime.Object) error { + if !mutating && !validating { + return nil + } + finalAttributes := admission.NewAttributesRecord( + nil, + // Deep copy the object to avoid accidentally changing the object. + old.DeepCopyObject(), + staticAttributes.GetKind(), + staticAttributes.GetNamespace(), + staticAttributes.GetName(), + staticAttributes.GetResource(), + staticAttributes.GetSubresource(), + staticAttributes.GetOperation(), + staticAttributes.GetOperationOptions(), + staticAttributes.IsDryRun(), + staticAttributes.GetUserInfo(), + ) + if mutating { + if err := mutatingAdmission.Admit(ctx, finalAttributes, objInterfaces); err != nil { + return err + } + } + if validating { + if err := validatingAdmission.Validate(ctx, finalAttributes, objInterfaces); err != nil { + return err + } + } + return nil + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/doc.go b/vendor/k8s.io/apiserver/pkg/registry/rest/doc.go new file mode 100644 index 0000000000..20524d21ff --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rest defines common logic around changes to Kubernetes-style resources. +package rest // import "k8s.io/apiserver/pkg/registry/rest" diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/export.go b/vendor/k8s.io/apiserver/pkg/registry/rest/export.go new file mode 100644 index 0000000000..b3fd8af300 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/export.go @@ -0,0 +1,34 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +// RESTExportStrategy is the interface that defines how to export a Kubernetes +// object. An exported object is stripped of non-user-settable fields and +// optionally, the identifying information related to the object's identity in +// the cluster so that it can be loaded into a different namespace or entirely +// different cluster without conflict. +type RESTExportStrategy interface { + // Export strips fields that can not be set by the user. If 'exact' is false + // fields specific to the cluster are also stripped + Export(ctx context.Context, obj runtime.Object, exact bool) error +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/meta.go b/vendor/k8s.io/apiserver/pkg/registry/rest/meta.go new file mode 100644 index 0000000000..add6044ab0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/meta.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +// FillObjectMetaSystemFields populates fields that are managed by the system on ObjectMeta. +func FillObjectMetaSystemFields(meta metav1.Object) { + meta.SetCreationTimestamp(metav1.Now()) + meta.SetUID(uuid.NewUUID()) + meta.SetSelfLink("") +} + +// ValidNamespace returns false if the namespace on the context differs from +// the resource. If the resource has no namespace, it is set to the value in +// the context. +func ValidNamespace(ctx context.Context, resource metav1.Object) bool { + ns, ok := genericapirequest.NamespaceFrom(ctx) + if len(resource.GetNamespace()) == 0 { + resource.SetNamespace(ns) + } + return ns == resource.GetNamespace() && ok +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go b/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go new file mode 100644 index 0000000000..8f9c981dd8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go @@ -0,0 +1,351 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + "io" + "net/http" + "net/url" + + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" +) + +//TODO: +// Storage interfaces need to be separated into two groups; those that operate +// on collections and those that operate on individually named items. +// Collection interfaces: +// (Method: Current -> Proposed) +// GET: Lister -> CollectionGetter +// WATCH: Watcher -> CollectionWatcher +// CREATE: Creater -> CollectionCreater +// DELETE: (n/a) -> CollectionDeleter +// UPDATE: (n/a) -> CollectionUpdater +// +// Single item interfaces: +// (Method: Current -> Proposed) +// GET: Getter -> NamedGetter +// WATCH: (n/a) -> NamedWatcher +// CREATE: (n/a) -> NamedCreater +// DELETE: Deleter -> NamedDeleter +// UPDATE: Update -> NamedUpdater + +// Storage is a generic interface for RESTful storage services. +// Resources which are exported to the RESTful API of apiserver need to implement this interface. It is expected +// that objects may implement any of the below interfaces. +type Storage interface { + // New returns an empty object that can be used with Create and Update after request data has been put into it. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + New() runtime.Object +} + +// Scoper indicates what scope the resource is at. It must be specified. +// It is usually provided automatically based on your strategy. +type Scoper interface { + // NamespaceScoped returns true if the storage is namespaced + NamespaceScoped() bool +} + +// KindProvider specifies a different kind for its API than for its internal storage. This is necessary for external +// objects that are not compiled into the api server. For such objects, there is no in-memory representation for +// the object, so they must be represented as generic objects (e.g. runtime.Unknown), but when we present the object as part of +// API discovery we want to present the specific kind, not the generic internal representation. +type KindProvider interface { + Kind() string +} + +// ShortNamesProvider is an interface for RESTful storage services. Delivers a list of short names for a resource. The list is used by kubectl to have short names representation of resources. +type ShortNamesProvider interface { + ShortNames() []string +} + +// CategoriesProvider allows a resource to specify which groups of resources (categories) it's part of. Categories can +// be used by API clients to refer to a batch of resources by using a single name (e.g. "all" could translate to "pod,rc,svc,..."). +type CategoriesProvider interface { + Categories() []string +} + +// GroupVersionKindProvider is used to specify a particular GroupVersionKind to discovery. This is used for polymorphic endpoints +// which generally point to foreign versions. Scale refers to Scale.v1beta1.extensions for instance. +// This trumps KindProvider since it is capable of providing the information required. +// TODO KindProvider (only used by federation) should be removed and replaced with this, but that presents greater risk late in 1.8. +type GroupVersionKindProvider interface { + GroupVersionKind(containingGV schema.GroupVersion) schema.GroupVersionKind +} + +// Lister is an object that can retrieve resources that match the provided field and label criteria. +type Lister interface { + // NewList returns an empty object that can be used with the List call. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + NewList() runtime.Object + // List selects resources in the storage which match to the selector. 'options' can be nil. + List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) + // TableConvertor ensures all list implementers also implement table conversion + TableConvertor +} + +// Exporter is an object that knows how to strip a RESTful resource for export. A store should implement this interface +// if export is generally supported for that type. Errors can still be returned during the actual Export when certain +// instances of the type are not exportable. +type Exporter interface { + // Export an object. Fields that are not user specified (e.g. Status, ObjectMeta.ResourceVersion) are stripped out + // Returns the stripped object. If 'exact' is true, fields that are specific to the cluster (e.g. namespace) are + // retained, otherwise they are stripped also. + Export(ctx context.Context, name string, opts metav1.ExportOptions) (runtime.Object, error) +} + +// Getter is an object that can retrieve a named RESTful resource. +type Getter interface { + // Get finds a resource in the storage by name and returns it. + // Although it can return an arbitrary error value, IsNotFound(err) is true for the + // returned error value err when the specified resource is not found. + Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) +} + +// GetterWithOptions is an object that retrieve a named RESTful resource and takes +// additional options on the get request. It allows a caller to also receive the +// subpath of the GET request. +type GetterWithOptions interface { + // Get finds a resource in the storage by name and returns it. + // Although it can return an arbitrary error value, IsNotFound(err) is true for the + // returned error value err when the specified resource is not found. + // The options object passed to it is of the same type returned by the NewGetOptions + // method. + // TODO: Pass metav1.GetOptions. + Get(ctx context.Context, name string, options runtime.Object) (runtime.Object, error) + + // NewGetOptions returns an empty options object that will be used to pass + // options to the Get method. It may return a bool and a string, if true, the + // value of the request path below the object will be included as the named + // string in the serialization of the runtime object. E.g., returning "path" + // will convert the trailing request scheme value to "path" in the map[string][]string + // passed to the converter. + NewGetOptions() (runtime.Object, bool, string) +} + +type TableConvertor interface { + ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) +} + +// GracefulDeleter knows how to pass deletion options to allow delayed deletion of a +// RESTful object. +type GracefulDeleter interface { + // Delete finds a resource in the storage and deletes it. + // The delete attempt is validated by the deleteValidation first. + // If options are provided, the resource will attempt to honor them or return an invalid + // request error. + // Although it can return an arbitrary error value, IsNotFound(err) is true for the + // returned error value err when the specified resource is not found. + // Delete *may* return the object that was deleted, or a status object indicating additional + // information about deletion. + // It also returns a boolean which is set to true if the resource was instantly + // deleted or false if it will be deleted asynchronously. + Delete(ctx context.Context, name string, deleteValidation ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) +} + +// MayReturnFullObjectDeleter may return deleted object (instead of a simple status) on deletion. +type MayReturnFullObjectDeleter interface { + DeleteReturnsDeletedObject() bool +} + +// CollectionDeleter is an object that can delete a collection +// of RESTful resources. +type CollectionDeleter interface { + // DeleteCollection selects all resources in the storage matching given 'listOptions' + // and deletes them. The delete attempt is validated by the deleteValidation first. + // If 'options' are provided, the resource will attempt to honor them or return an + // invalid request error. + // DeleteCollection may not be atomic - i.e. it may delete some objects and still + // return an error after it. On success, returns a list of deleted objects. + DeleteCollection(ctx context.Context, deleteValidation ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) +} + +// Creater is an object that can create an instance of a RESTful object. +type Creater interface { + // New returns an empty object that can be used with Create after request data has been put into it. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + New() runtime.Object + + // Create creates a new version of a resource. + Create(ctx context.Context, obj runtime.Object, createValidation ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) +} + +// NamedCreater is an object that can create an instance of a RESTful object using a name parameter. +type NamedCreater interface { + // New returns an empty object that can be used with Create after request data has been put into it. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + New() runtime.Object + + // Create creates a new version of a resource. It expects a name parameter from the path. + // This is needed for create operations on subresources which include the name of the parent + // resource in the path. + Create(ctx context.Context, name string, obj runtime.Object, createValidation ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) +} + +// UpdatedObjectInfo provides information about an updated object to an Updater. +// It requires access to the old object in order to return the newly updated object. +type UpdatedObjectInfo interface { + // Returns preconditions built from the updated object, if applicable. + // May return nil, or a preconditions object containing nil fields, + // if no preconditions can be determined from the updated object. + Preconditions() *metav1.Preconditions + + // UpdatedObject returns the updated object, given a context and old object. + // The only time an empty oldObj should be passed in is if a "create on update" is occurring (there is no oldObj). + UpdatedObject(ctx context.Context, oldObj runtime.Object) (newObj runtime.Object, err error) +} + +// ValidateObjectFunc is a function to act on a given object. An error may be returned +// if the hook cannot be completed. An ObjectFunc may NOT transform the provided +// object. +type ValidateObjectFunc func(ctx context.Context, obj runtime.Object) error + +// ValidateAllObjectFunc is a "admit everything" instance of ValidateObjectFunc. +func ValidateAllObjectFunc(ctx context.Context, obj runtime.Object) error { + return nil +} + +// ValidateObjectUpdateFunc is a function to act on a given object and its predecessor. +// An error may be returned if the hook cannot be completed. An UpdateObjectFunc +// may NOT transform the provided object. +type ValidateObjectUpdateFunc func(ctx context.Context, obj, old runtime.Object) error + +// ValidateAllObjectUpdateFunc is a "admit everything" instance of ValidateObjectUpdateFunc. +func ValidateAllObjectUpdateFunc(ctx context.Context, obj, old runtime.Object) error { + return nil +} + +// Updater is an object that can update an instance of a RESTful object. +type Updater interface { + // New returns an empty object that can be used with Update after request data has been put into it. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + New() runtime.Object + + // Update finds a resource in the storage and updates it. Some implementations + // may allow updates creates the object - they should set the created boolean + // to true. + Update(ctx context.Context, name string, objInfo UpdatedObjectInfo, createValidation ValidateObjectFunc, updateValidation ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) +} + +// CreaterUpdater is a storage object that must support both create and update. +// Go prevents embedded interfaces that implement the same method. +type CreaterUpdater interface { + Creater + Update(ctx context.Context, name string, objInfo UpdatedObjectInfo, createValidation ValidateObjectFunc, updateValidation ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) +} + +// CreaterUpdater must satisfy the Updater interface. +var _ Updater = CreaterUpdater(nil) + +// Patcher is a storage object that supports both get and update. +type Patcher interface { + Getter + Updater +} + +// Watcher should be implemented by all Storage objects that +// want to offer the ability to watch for changes through the watch api. +type Watcher interface { + // 'label' selects on labels; 'field' selects on the object's fields. Not all fields + // are supported; an error should be returned if 'field' tries to select on a field that + // isn't supported. 'resourceVersion' allows for continuing/starting a watch at a + // particular version. + Watch(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) +} + +// StandardStorage is an interface covering the common verbs. Provided for testing whether a +// resource satisfies the normal storage methods. Use Storage when passing opaque storage objects. +type StandardStorage interface { + Getter + Lister + CreaterUpdater + GracefulDeleter + CollectionDeleter + Watcher +} + +// Redirector know how to return a remote resource's location. +type Redirector interface { + // ResourceLocation should return the remote location of the given resource, and an optional transport to use to request it, or an error. + ResourceLocation(ctx context.Context, id string) (remoteLocation *url.URL, transport http.RoundTripper, err error) +} + +// Responder abstracts the normal response behavior for a REST method and is passed to callers that +// may wish to handle the response directly in some cases, but delegate to the normal error or object +// behavior in other cases. +type Responder interface { + // Object writes the provided object to the response. Invoking this method multiple times is undefined. + Object(statusCode int, obj runtime.Object) + // Error writes the provided error to the response. This method may only be invoked once. + Error(err error) +} + +// Connecter is a storage object that responds to a connection request. +type Connecter interface { + // Connect returns an http.Handler that will handle the request/response for a given API invocation. + // The provided responder may be used for common API responses. The responder will write both status + // code and body, so the ServeHTTP method should exit after invoking the responder. The Handler will + // be used for a single API request and then discarded. The Responder is guaranteed to write to the + // same http.ResponseWriter passed to ServeHTTP. + Connect(ctx context.Context, id string, options runtime.Object, r Responder) (http.Handler, error) + + // NewConnectOptions returns an empty options object that will be used to pass + // options to the Connect method. If nil, then a nil options object is passed to + // Connect. It may return a bool and a string. If true, the value of the request + // path below the object will be included as the named string in the serialization + // of the runtime object. + NewConnectOptions() (runtime.Object, bool, string) + + // ConnectMethods returns the list of HTTP methods handled by Connect + ConnectMethods() []string +} + +// ResourceStreamer is an interface implemented by objects that prefer to be streamed from the server +// instead of decoded directly. +type ResourceStreamer interface { + // InputStream should return an io.ReadCloser if the provided object supports streaming. The desired + // api version and an accept header (may be empty) are passed to the call. If no error occurs, + // the caller may return a flag indicating whether the result should be flushed as writes occur + // and a content type string that indicates the type of the stream. + // If a null stream is returned, a StatusNoContent response wil be generated. + InputStream(ctx context.Context, apiVersion, acceptHeader string) (stream io.ReadCloser, flush bool, mimeType string, err error) +} + +// StorageMetadata is an optional interface that callers can implement to provide additional +// information about their Storage objects. +type StorageMetadata interface { + // ProducesMIMETypes returns a list of the MIME types the specified HTTP verb (GET, POST, DELETE, + // PATCH) can respond with. + ProducesMIMETypes(verb string) []string + + // ProducesObject returns an object the specified HTTP verb respond with. It will overwrite storage object if + // it is not nil. Only the type of the return object matters, the value will be ignored. + ProducesObject(verb string) interface{} +} + +// StorageVersionProvider is an optional interface that a storage object can +// implement if it wishes to disclose its storage version. +type StorageVersionProvider interface { + // StorageVersion returns a group versioner, which will outputs the gvk + // an object will be converted to before persisted in etcd, given a + // list of kinds the object might belong to. + StorageVersion() runtime.GroupVersioner +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/table.go b/vendor/k8s.io/apiserver/pkg/registry/rest/table.go new file mode 100644 index 0000000000..d90ae70762 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/table.go @@ -0,0 +1,107 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + "fmt" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +type defaultTableConvertor struct { + defaultQualifiedResource schema.GroupResource +} + +// NewDefaultTableConvertor creates a default convertor; the provided resource is used for error messages +// if no resource info can be determined from the context passed to ConvertToTable. +func NewDefaultTableConvertor(defaultQualifiedResource schema.GroupResource) TableConvertor { + return defaultTableConvertor{defaultQualifiedResource: defaultQualifiedResource} +} + +var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() + +func (c defaultTableConvertor) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) { + var table metav1.Table + fn := func(obj runtime.Object) error { + m, err := meta.Accessor(obj) + if err != nil { + resource := c.defaultQualifiedResource + if info, ok := genericapirequest.RequestInfoFrom(ctx); ok { + resource = schema.GroupResource{Group: info.APIGroup, Resource: info.Resource} + } + return errNotAcceptable{resource: resource} + } + table.Rows = append(table.Rows, metav1.TableRow{ + Cells: []interface{}{m.GetName(), m.GetCreationTimestamp().Time.UTC().Format(time.RFC3339)}, + Object: runtime.RawExtension{Object: obj}, + }) + return nil + } + switch { + case meta.IsListType(object): + if err := meta.EachListItem(object, fn); err != nil { + return nil, err + } + default: + if err := fn(object); err != nil { + return nil, err + } + } + if m, err := meta.ListAccessor(object); err == nil { + table.ResourceVersion = m.GetResourceVersion() + table.SelfLink = m.GetSelfLink() + table.Continue = m.GetContinue() + table.RemainingItemCount = m.GetRemainingItemCount() + } else { + if m, err := meta.CommonAccessor(object); err == nil { + table.ResourceVersion = m.GetResourceVersion() + table.SelfLink = m.GetSelfLink() + } + } + if opt, ok := tableOptions.(*metav1.TableOptions); !ok || !opt.NoHeaders { + table.ColumnDefinitions = []metav1.TableColumnDefinition{ + {Name: "Name", Type: "string", Format: "name", Description: swaggerMetadataDescriptions["name"]}, + {Name: "Created At", Type: "date", Description: swaggerMetadataDescriptions["creationTimestamp"]}, + } + } + return &table, nil +} + +// errNotAcceptable indicates the resource doesn't support Table conversion +type errNotAcceptable struct { + resource schema.GroupResource +} + +func (e errNotAcceptable) Error() string { + return fmt.Sprintf("the resource %s does not support being converted to a Table", e.resource) +} + +func (e errNotAcceptable) Status() metav1.Status { + return metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotAcceptable, + Reason: metav1.StatusReason("NotAcceptable"), + Message: e.Error(), + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/update.go b/vendor/k8s.io/apiserver/pkg/registry/rest/update.go new file mode 100644 index 0000000000..0741b84ec2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/update.go @@ -0,0 +1,279 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + genericvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/api/validation/path" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +// RESTUpdateStrategy defines the minimum validation, accepted input, and +// name generation behavior to update an object that follows Kubernetes +// API conventions. A resource may have many UpdateStrategies, depending on +// the call pattern in use. +type RESTUpdateStrategy interface { + runtime.ObjectTyper + // NamespaceScoped returns true if the object must be within a namespace. + NamespaceScoped() bool + // AllowCreateOnUpdate returns true if the object can be created by a PUT. + AllowCreateOnUpdate() bool + // PrepareForUpdate is invoked on update before validation to normalize + // the object. For example: remove fields that are not to be persisted, + // sort order-insensitive list fields, etc. This should not remove fields + // whose presence would be considered a validation error. + PrepareForUpdate(ctx context.Context, obj, old runtime.Object) + // ValidateUpdate is invoked after default fields in the object have been + // filled in before the object is persisted. This method should not mutate + // the object. + ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList + // Canonicalize allows an object to be mutated into a canonical form. This + // ensures that code that operates on these objects can rely on the common + // form for things like comparison. Canonicalize is invoked after + // validation has succeeded but before the object has been persisted. + // This method may mutate the object. + Canonicalize(obj runtime.Object) + // AllowUnconditionalUpdate returns true if the object can be updated + // unconditionally (irrespective of the latest resource version), when + // there is no resource version specified in the object. + AllowUnconditionalUpdate() bool +} + +// TODO: add other common fields that require global validation. +func validateCommonFields(obj, old runtime.Object, strategy RESTUpdateStrategy) (field.ErrorList, error) { + allErrs := field.ErrorList{} + objectMeta, err := meta.Accessor(obj) + if err != nil { + return nil, fmt.Errorf("failed to get new object metadata: %v", err) + } + oldObjectMeta, err := meta.Accessor(old) + if err != nil { + return nil, fmt.Errorf("failed to get old object metadata: %v", err) + } + allErrs = append(allErrs, genericvalidation.ValidateObjectMetaAccessor(objectMeta, strategy.NamespaceScoped(), path.ValidatePathSegmentName, field.NewPath("metadata"))...) + allErrs = append(allErrs, genericvalidation.ValidateObjectMetaAccessorUpdate(objectMeta, oldObjectMeta, field.NewPath("metadata"))...) + + return allErrs, nil +} + +// BeforeUpdate ensures that common operations for all resources are performed on update. It only returns +// errors that can be converted to api.Status. It will invoke update validation with the provided existing +// and updated objects. +// It sets zero values only if the object does not have a zero value for the respective field. +func BeforeUpdate(strategy RESTUpdateStrategy, ctx context.Context, obj, old runtime.Object) error { + objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) + if kerr != nil { + return kerr + } + if strategy.NamespaceScoped() { + if !ValidNamespace(ctx, objectMeta) { + return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request") + } + } else if len(objectMeta.GetNamespace()) > 0 { + objectMeta.SetNamespace(metav1.NamespaceNone) + } + + // Ensure requests cannot update generation + oldMeta, err := meta.Accessor(old) + if err != nil { + return err + } + objectMeta.SetGeneration(oldMeta.GetGeneration()) + + // Ensure managedFields state is removed unless ServerSideApply is enabled + if !utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { + oldMeta.SetManagedFields(nil) + objectMeta.SetManagedFields(nil) + } + + strategy.PrepareForUpdate(ctx, obj, old) + + // ClusterName is ignored and should not be saved + if len(objectMeta.GetClusterName()) > 0 { + objectMeta.SetClusterName("") + } + // Use the existing UID if none is provided + if len(objectMeta.GetUID()) == 0 { + objectMeta.SetUID(oldMeta.GetUID()) + } + // ignore changes to timestamp + if oldCreationTime := oldMeta.GetCreationTimestamp(); !oldCreationTime.IsZero() { + objectMeta.SetCreationTimestamp(oldMeta.GetCreationTimestamp()) + } + // an update can never remove/change a deletion timestamp + if !oldMeta.GetDeletionTimestamp().IsZero() { + objectMeta.SetDeletionTimestamp(oldMeta.GetDeletionTimestamp()) + } + // an update can never remove/change grace period seconds + if oldMeta.GetDeletionGracePeriodSeconds() != nil && objectMeta.GetDeletionGracePeriodSeconds() == nil { + objectMeta.SetDeletionGracePeriodSeconds(oldMeta.GetDeletionGracePeriodSeconds()) + } + + // Ensure some common fields, like UID, are validated for all resources. + errs, err := validateCommonFields(obj, old, strategy) + if err != nil { + return errors.NewInternalError(err) + } + + errs = append(errs, strategy.ValidateUpdate(ctx, obj, old)...) + if len(errs) > 0 { + return errors.NewInvalid(kind.GroupKind(), objectMeta.GetName(), errs) + } + + strategy.Canonicalize(obj) + + return nil +} + +// TransformFunc is a function to transform and return newObj +type TransformFunc func(ctx context.Context, newObj runtime.Object, oldObj runtime.Object) (transformedNewObj runtime.Object, err error) + +// defaultUpdatedObjectInfo implements UpdatedObjectInfo +type defaultUpdatedObjectInfo struct { + // obj is the updated object + obj runtime.Object + + // transformers is an optional list of transforming functions that modify or + // replace obj using information from the context, old object, or other sources. + transformers []TransformFunc +} + +// DefaultUpdatedObjectInfo returns an UpdatedObjectInfo impl based on the specified object. +func DefaultUpdatedObjectInfo(obj runtime.Object, transformers ...TransformFunc) UpdatedObjectInfo { + return &defaultUpdatedObjectInfo{obj, transformers} +} + +// Preconditions satisfies the UpdatedObjectInfo interface. +func (i *defaultUpdatedObjectInfo) Preconditions() *metav1.Preconditions { + // Attempt to get the UID out of the object + accessor, err := meta.Accessor(i.obj) + if err != nil { + // If no UID can be read, no preconditions are possible + return nil + } + + // If empty, no preconditions needed + uid := accessor.GetUID() + if len(uid) == 0 { + return nil + } + + return &metav1.Preconditions{UID: &uid} +} + +// UpdatedObject satisfies the UpdatedObjectInfo interface. +// It returns a copy of the held obj, passed through any configured transformers. +func (i *defaultUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runtime.Object) (runtime.Object, error) { + var err error + // Start with the configured object + newObj := i.obj + + // If the original is non-nil (might be nil if the first transformer builds the object from the oldObj), make a copy, + // so we don't return the original. BeforeUpdate can mutate the returned object, doing things like clearing ResourceVersion. + // If we're re-called, we need to be able to return the pristine version. + if newObj != nil { + newObj = newObj.DeepCopyObject() + } + + // Allow any configured transformers to update the new object + for _, transformer := range i.transformers { + newObj, err = transformer(ctx, newObj, oldObj) + if err != nil { + return nil, err + } + } + + return newObj, nil +} + +// wrappedUpdatedObjectInfo allows wrapping an existing objInfo and +// chaining additional transformations/checks on the result of UpdatedObject() +type wrappedUpdatedObjectInfo struct { + // obj is the updated object + objInfo UpdatedObjectInfo + + // transformers is an optional list of transforming functions that modify or + // replace obj using information from the context, old object, or other sources. + transformers []TransformFunc +} + +// WrapUpdatedObjectInfo returns an UpdatedObjectInfo impl that delegates to +// the specified objInfo, then calls the passed transformers +func WrapUpdatedObjectInfo(objInfo UpdatedObjectInfo, transformers ...TransformFunc) UpdatedObjectInfo { + return &wrappedUpdatedObjectInfo{objInfo, transformers} +} + +// Preconditions satisfies the UpdatedObjectInfo interface. +func (i *wrappedUpdatedObjectInfo) Preconditions() *metav1.Preconditions { + return i.objInfo.Preconditions() +} + +// UpdatedObject satisfies the UpdatedObjectInfo interface. +// It delegates to the wrapped objInfo and passes the result through any configured transformers. +func (i *wrappedUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runtime.Object) (runtime.Object, error) { + newObj, err := i.objInfo.UpdatedObject(ctx, oldObj) + if err != nil { + return newObj, err + } + + // Allow any configured transformers to update the new object or error + for _, transformer := range i.transformers { + newObj, err = transformer(ctx, newObj, oldObj) + if err != nil { + return nil, err + } + } + + return newObj, nil +} + +// AdmissionToValidateObjectUpdateFunc converts validating admission to a rest validate object update func +func AdmissionToValidateObjectUpdateFunc(admit admission.Interface, staticAttributes admission.Attributes, o admission.ObjectInterfaces) ValidateObjectUpdateFunc { + validatingAdmission, ok := admit.(admission.ValidationInterface) + if !ok { + return func(ctx context.Context, obj, old runtime.Object) error { return nil } + } + return func(ctx context.Context, obj, old runtime.Object) error { + finalAttributes := admission.NewAttributesRecord( + obj, + old, + staticAttributes.GetKind(), + staticAttributes.GetNamespace(), + staticAttributes.GetName(), + staticAttributes.GetResource(), + staticAttributes.GetSubresource(), + staticAttributes.GetOperation(), + staticAttributes.GetOperationOptions(), + staticAttributes.IsDryRun(), + staticAttributes.GetUserInfo(), + ) + if !validatingAdmission.Handles(finalAttributes.GetOperation()) { + return nil + } + return validatingAdmission.Validate(ctx, finalAttributes, o) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go new file mode 100644 index 0000000000..9ac8579240 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -0,0 +1,854 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "fmt" + "net" + "net/http" + goruntime "runtime" + "runtime/debug" + "sort" + "strconv" + "strings" + "sync/atomic" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/go-openapi/spec" + "github.com/google/uuid" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" + "k8s.io/apimachinery/pkg/version" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + auditpolicy "k8s.io/apiserver/pkg/audit/policy" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/authenticatorfactory" + authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/authorization/authorizerfactory" + authorizerunion "k8s.io/apiserver/pkg/authorization/union" + "k8s.io/apiserver/pkg/endpoints/discovery" + "k8s.io/apiserver/pkg/endpoints/filterlatency" + genericapifilters "k8s.io/apiserver/pkg/endpoints/filters" + apiopenapi "k8s.io/apiserver/pkg/endpoints/openapi" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + genericregistry "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + "k8s.io/apiserver/pkg/server/egressselector" + genericfilters "k8s.io/apiserver/pkg/server/filters" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/apiserver/pkg/server/routes" + serverstore "k8s.io/apiserver/pkg/server/storage" + "k8s.io/apiserver/pkg/storageversion" + "k8s.io/apiserver/pkg/util/feature" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + "k8s.io/client-go/informers" + restclient "k8s.io/client-go/rest" + "k8s.io/component-base/logs" + "k8s.io/klog/v2" + openapicommon "k8s.io/kube-openapi/pkg/common" + utilsnet "k8s.io/utils/net" + + // install apis + _ "k8s.io/apiserver/pkg/apis/apiserver/install" +) + +const ( + // DefaultLegacyAPIPrefix is where the legacy APIs will be located. + DefaultLegacyAPIPrefix = "/api" + + // APIGroupPrefix is where non-legacy API group will be located. + APIGroupPrefix = "/apis" +) + +// Config is a structure used to configure a GenericAPIServer. +// Its members are sorted roughly in order of importance for composers. +type Config struct { + // SecureServing is required to serve https + SecureServing *SecureServingInfo + + // Authentication is the configuration for authentication + Authentication AuthenticationInfo + + // Authorization is the configuration for authorization + Authorization AuthorizationInfo + + // LoopbackClientConfig is a config for a privileged loopback connection to the API server + // This is required for proper functioning of the PostStartHooks on a GenericAPIServer + // TODO: move into SecureServing(WithLoopback) as soon as insecure serving is gone + LoopbackClientConfig *restclient.Config + + // EgressSelector provides a lookup mechanism for dialing outbound connections. + // It does so based on a EgressSelectorConfiguration which was read at startup. + EgressSelector *egressselector.EgressSelector + + // RuleResolver is required to get the list of rules that apply to a given user + // in a given namespace + RuleResolver authorizer.RuleResolver + // AdmissionControl performs deep inspection of a given request (including content) + // to set values and determine whether its allowed + AdmissionControl admission.Interface + CorsAllowedOriginList []string + + // FlowControl, if not nil, gives priority and fairness to request handling + FlowControl utilflowcontrol.Interface + + EnableIndex bool + EnableProfiling bool + EnableDiscovery bool + // Requires generic profiling enabled + EnableContentionProfiling bool + EnableMetrics bool + + DisabledPostStartHooks sets.String + // done values in this values for this map are ignored. + PostStartHooks map[string]PostStartHookConfigEntry + + // Version will enable the /version endpoint if non-nil + Version *version.Info + // AuditBackend is where audit events are sent to. + AuditBackend audit.Backend + // AuditPolicyChecker makes the decision of whether and how to audit log a request. + AuditPolicyChecker auditpolicy.Checker + // ExternalAddress is the host name to use for external (public internet) facing URLs (e.g. Swagger) + // Will default to a value based on secure serving info and available ipv4 IPs. + ExternalAddress string + + //=========================================================================== + // Fields you probably don't care about changing + //=========================================================================== + + // BuildHandlerChainFunc allows you to build custom handler chains by decorating the apiHandler. + BuildHandlerChainFunc func(apiHandler http.Handler, c *Config) (secure http.Handler) + // HandlerChainWaitGroup allows you to wait for all chain handlers exit after the server shutdown. + HandlerChainWaitGroup *utilwaitgroup.SafeWaitGroup + // DiscoveryAddresses is used to build the IPs pass to discovery. If nil, the ExternalAddress is + // always reported + DiscoveryAddresses discovery.Addresses + // The default set of healthz checks. There might be more added via AddHealthChecks dynamically. + HealthzChecks []healthz.HealthChecker + // The default set of livez checks. There might be more added via AddHealthChecks dynamically. + LivezChecks []healthz.HealthChecker + // The default set of readyz-only checks. There might be more added via AddReadyzChecks dynamically. + ReadyzChecks []healthz.HealthChecker + // LegacyAPIGroupPrefixes is used to set up URL parsing for authorization and for validating requests + // to InstallLegacyAPIGroup. New API servers don't generally have legacy groups at all. + LegacyAPIGroupPrefixes sets.String + // RequestInfoResolver is used to assign attributes (used by admission and authorization) based on a request URL. + // Use-cases that are like kubelets may need to customize this. + RequestInfoResolver apirequest.RequestInfoResolver + // Serializer is required and provides the interface for serializing and converting objects to and from the wire + // The default (api.Codecs) usually works fine. + Serializer runtime.NegotiatedSerializer + // OpenAPIConfig will be used in generating OpenAPI spec. This is nil by default. Use DefaultOpenAPIConfig for "working" defaults. + OpenAPIConfig *openapicommon.Config + + // RESTOptionsGetter is used to construct RESTStorage types via the generic registry. + RESTOptionsGetter genericregistry.RESTOptionsGetter + + // If specified, all requests except those which match the LongRunningFunc predicate will timeout + // after this duration. + RequestTimeout time.Duration + // If specified, long running requests such as watch will be allocated a random timeout between this value, and + // twice this value. Note that it is up to the request handlers to ignore or honor this timeout. In seconds. + MinRequestTimeout int + + // This represents the maximum amount of time it should take for apiserver to complete its startup + // sequence and become healthy. From apiserver's start time to when this amount of time has + // elapsed, /livez will assume that unfinished post-start hooks will complete successfully and + // therefore return true. + LivezGracePeriod time.Duration + // ShutdownDelayDuration allows to block shutdown for some time, e.g. until endpoints pointing to this API server + // have converged on all node. During this time, the API server keeps serving, /healthz will return 200, + // but /readyz will return failure. + ShutdownDelayDuration time.Duration + + // The limit on the total size increase all "copy" operations in a json + // patch may cause. + // This affects all places that applies json patch in the binary. + JSONPatchMaxCopyBytes int64 + // The limit on the request size that would be accepted and decoded in a write request + // 0 means no limit. + MaxRequestBodyBytes int64 + // MaxRequestsInFlight is the maximum number of parallel non-long-running requests. Every further + // request has to wait. Applies only to non-mutating requests. + MaxRequestsInFlight int + // MaxMutatingRequestsInFlight is the maximum number of parallel mutating requests. Every further + // request has to wait. + MaxMutatingRequestsInFlight int + // Predicate which is true for paths of long-running http requests + LongRunningFunc apirequest.LongRunningRequestCheck + + // GoawayChance is the probability that send a GOAWAY to HTTP/2 clients. When client received + // GOAWAY, the in-flight requests will not be affected and new requests will use + // a new TCP connection to triggering re-balancing to another server behind the load balance. + // Default to 0, means never send GOAWAY. Max is 0.02 to prevent break the apiserver. + GoawayChance float64 + + // MergedResourceConfig indicates which groupVersion enabled and its resources enabled/disabled. + // This is composed of genericapiserver defaultAPIResourceConfig and those parsed from flags. + // If not specify any in flags, then genericapiserver will only enable defaultAPIResourceConfig. + MergedResourceConfig *serverstore.ResourceConfig + + //=========================================================================== + // values below here are targets for removal + //=========================================================================== + + // PublicAddress is the IP address where members of the cluster (kubelet, + // kube-proxy, services, etc.) can reach the GenericAPIServer. + // If nil or 0.0.0.0, the host's default interface will be used. + PublicAddress net.IP + + // EquivalentResourceRegistry provides information about resources equivalent to a given resource, + // and the kind associated with a given resource. As resources are installed, they are registered here. + EquivalentResourceRegistry runtime.EquivalentResourceRegistry + + // APIServerID is the ID of this API server + APIServerID string + + // StorageVersionManager holds the storage versions of the API resources installed by this server. + StorageVersionManager storageversion.Manager +} + +type RecommendedConfig struct { + Config + + // SharedInformerFactory provides shared informers for Kubernetes resources. This value is set by + // RecommendedOptions.CoreAPI.ApplyTo called by RecommendedOptions.ApplyTo. It uses an in-cluster client config + // by default, or the kubeconfig given with kubeconfig command line flag. + SharedInformerFactory informers.SharedInformerFactory + + // ClientConfig holds the kubernetes client configuration. + // This value is set by RecommendedOptions.CoreAPI.ApplyTo called by RecommendedOptions.ApplyTo. + // By default in-cluster client config is used. + ClientConfig *restclient.Config +} + +type SecureServingInfo struct { + // Listener is the secure server network listener. + Listener net.Listener + + // Cert is the main server cert which is used if SNI does not match. Cert must be non-nil and is + // allowed to be in SNICerts. + Cert dynamiccertificates.CertKeyContentProvider + + // SNICerts are the TLS certificates used for SNI. + SNICerts []dynamiccertificates.SNICertKeyContentProvider + + // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + ClientCA dynamiccertificates.CAContentProvider + + // MinTLSVersion optionally overrides the minimum TLS version supported. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + MinTLSVersion uint16 + + // CipherSuites optionally overrides the list of allowed cipher suites for the server. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + CipherSuites []uint16 + + // HTTP2MaxStreamsPerConnection is the limit that the api server imposes on each client. + // A value of zero means to use the default provided by golang's HTTP/2 support. + HTTP2MaxStreamsPerConnection int + + // DisableHTTP2 indicates that http2 should not be enabled. + DisableHTTP2 bool +} + +type AuthenticationInfo struct { + // APIAudiences is a list of identifier that the API identifies as. This is + // used by some authenticators to validate audience bound credentials. + APIAudiences authenticator.Audiences + // Authenticator determines which subject is making the request + Authenticator authenticator.Request +} + +type AuthorizationInfo struct { + // Authorizer determines whether the subject is allowed to make the request based only + // on the RequestURI + Authorizer authorizer.Authorizer +} + +// NewConfig returns a Config struct with the default values +func NewConfig(codecs serializer.CodecFactory) *Config { + defaultHealthChecks := []healthz.HealthChecker{healthz.PingHealthz, healthz.LogHealthz} + var id string + if feature.DefaultFeatureGate.Enabled(features.APIServerIdentity) { + id = "kube-apiserver-" + uuid.New().String() + } + return &Config{ + Serializer: codecs, + BuildHandlerChainFunc: DefaultBuildHandlerChain, + HandlerChainWaitGroup: new(utilwaitgroup.SafeWaitGroup), + LegacyAPIGroupPrefixes: sets.NewString(DefaultLegacyAPIPrefix), + DisabledPostStartHooks: sets.NewString(), + PostStartHooks: map[string]PostStartHookConfigEntry{}, + HealthzChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), + ReadyzChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), + LivezChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), + EnableIndex: true, + EnableDiscovery: true, + EnableProfiling: true, + EnableMetrics: true, + MaxRequestsInFlight: 400, + MaxMutatingRequestsInFlight: 200, + RequestTimeout: time.Duration(60) * time.Second, + MinRequestTimeout: 1800, + LivezGracePeriod: time.Duration(0), + ShutdownDelayDuration: time.Duration(0), + // 1.5MB is the default client request size in bytes + // the etcd server should accept. See + // https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56. + // A request body might be encoded in json, and is converted to + // proto when persisted in etcd, so we allow 2x as the largest size + // increase the "copy" operations in a json patch may cause. + JSONPatchMaxCopyBytes: int64(3 * 1024 * 1024), + // 1.5MB is the recommended client request size in byte + // the etcd server should accept. See + // https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56. + // A request body might be encoded in json, and is converted to + // proto when persisted in etcd, so we allow 2x as the largest request + // body size to be accepted and decoded in a write request. + MaxRequestBodyBytes: int64(3 * 1024 * 1024), + + // Default to treating watch as a long-running operation + // Generic API servers have no inherent long-running subresources + LongRunningFunc: genericfilters.BasicLongRunningRequestCheck(sets.NewString("watch"), sets.NewString()), + APIServerID: id, + StorageVersionManager: storageversion.NewDefaultManager(), + } +} + +// NewRecommendedConfig returns a RecommendedConfig struct with the default values +func NewRecommendedConfig(codecs serializer.CodecFactory) *RecommendedConfig { + return &RecommendedConfig{ + Config: *NewConfig(codecs), + } +} + +func DefaultOpenAPIConfig(getDefinitions openapicommon.GetOpenAPIDefinitions, defNamer *apiopenapi.DefinitionNamer) *openapicommon.Config { + return &openapicommon.Config{ + ProtocolList: []string{"https"}, + IgnorePrefixes: []string{}, + Info: &spec.Info{ + InfoProps: spec.InfoProps{ + Title: "Generic API Server", + }, + }, + DefaultResponse: &spec.Response{ + ResponseProps: spec.ResponseProps{ + Description: "Default Response.", + }, + }, + GetOperationIDAndTags: apiopenapi.GetOperationIDAndTags, + GetDefinitionName: defNamer.GetDefinitionName, + GetDefinitions: getDefinitions, + } +} + +func (c *AuthenticationInfo) ApplyClientCert(clientCA dynamiccertificates.CAContentProvider, servingInfo *SecureServingInfo) error { + if servingInfo == nil { + return nil + } + if clientCA == nil { + return nil + } + if servingInfo.ClientCA == nil { + servingInfo.ClientCA = clientCA + return nil + } + + servingInfo.ClientCA = dynamiccertificates.NewUnionCAContentProvider(servingInfo.ClientCA, clientCA) + return nil +} + +type completedConfig struct { + *Config + + //=========================================================================== + // values below here are filled in during completion + //=========================================================================== + + // SharedInformerFactory provides shared informers for resources + SharedInformerFactory informers.SharedInformerFactory +} + +type CompletedConfig struct { + // Embed a private pointer that cannot be instantiated outside of this package. + *completedConfig +} + +// AddHealthChecks adds a health check to our config to be exposed by the health endpoints +// of our configured apiserver. We should prefer this to adding healthChecks directly to +// the config unless we explicitly want to add a healthcheck only to a specific health endpoint. +func (c *Config) AddHealthChecks(healthChecks ...healthz.HealthChecker) { + for _, check := range healthChecks { + c.HealthzChecks = append(c.HealthzChecks, check) + c.LivezChecks = append(c.LivezChecks, check) + c.ReadyzChecks = append(c.ReadyzChecks, check) + } +} + +// AddPostStartHook allows you to add a PostStartHook that will later be added to the server itself in a New call. +// Name conflicts will cause an error. +func (c *Config) AddPostStartHook(name string, hook PostStartHookFunc) error { + if len(name) == 0 { + return fmt.Errorf("missing name") + } + if hook == nil { + return fmt.Errorf("hook func may not be nil: %q", name) + } + if c.DisabledPostStartHooks.Has(name) { + klog.V(1).Infof("skipping %q because it was explicitly disabled", name) + return nil + } + + if postStartHook, exists := c.PostStartHooks[name]; exists { + // this is programmer error, but it can be hard to debug + return fmt.Errorf("unable to add %q because it was already registered by: %s", name, postStartHook.originatingStack) + } + c.PostStartHooks[name] = PostStartHookConfigEntry{hook: hook, originatingStack: string(debug.Stack())} + + return nil +} + +// AddPostStartHookOrDie allows you to add a PostStartHook, but dies on failure. +func (c *Config) AddPostStartHookOrDie(name string, hook PostStartHookFunc) { + if err := c.AddPostStartHook(name, hook); err != nil { + klog.Fatalf("Error registering PostStartHook %q: %v", name, err) + } +} + +// Complete fills in any fields not set that are required to have valid data and can be derived +// from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver. +func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig { + if len(c.ExternalAddress) == 0 && c.PublicAddress != nil { + c.ExternalAddress = c.PublicAddress.String() + } + + // if there is no port, and we listen on one securely, use that one + if _, _, err := net.SplitHostPort(c.ExternalAddress); err != nil { + if c.SecureServing == nil { + klog.Fatalf("cannot derive external address port without listening on a secure port.") + } + _, port, err := c.SecureServing.HostPort() + if err != nil { + klog.Fatalf("cannot derive external address from the secure port: %v", err) + } + c.ExternalAddress = net.JoinHostPort(c.ExternalAddress, strconv.Itoa(port)) + } + + if c.OpenAPIConfig != nil { + if c.OpenAPIConfig.SecurityDefinitions != nil { + // Setup OpenAPI security: all APIs will have the same authentication for now. + c.OpenAPIConfig.DefaultSecurity = []map[string][]string{} + keys := []string{} + for k := range *c.OpenAPIConfig.SecurityDefinitions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + c.OpenAPIConfig.DefaultSecurity = append(c.OpenAPIConfig.DefaultSecurity, map[string][]string{k: {}}) + } + if c.OpenAPIConfig.CommonResponses == nil { + c.OpenAPIConfig.CommonResponses = map[int]spec.Response{} + } + if _, exists := c.OpenAPIConfig.CommonResponses[http.StatusUnauthorized]; !exists { + c.OpenAPIConfig.CommonResponses[http.StatusUnauthorized] = spec.Response{ + ResponseProps: spec.ResponseProps{ + Description: "Unauthorized", + }, + } + } + } + + // make sure we populate info, and info.version, if not manually set + if c.OpenAPIConfig.Info == nil { + c.OpenAPIConfig.Info = &spec.Info{} + } + if c.OpenAPIConfig.Info.Version == "" { + if c.Version != nil { + c.OpenAPIConfig.Info.Version = strings.Split(c.Version.String(), "-")[0] + } else { + c.OpenAPIConfig.Info.Version = "unversioned" + } + } + } + if c.DiscoveryAddresses == nil { + c.DiscoveryAddresses = discovery.DefaultAddresses{DefaultAddress: c.ExternalAddress} + } + + AuthorizeClientBearerToken(c.LoopbackClientConfig, &c.Authentication, &c.Authorization) + + if c.RequestInfoResolver == nil { + c.RequestInfoResolver = NewRequestInfoResolver(c) + } + + if c.EquivalentResourceRegistry == nil { + if c.RESTOptionsGetter == nil { + c.EquivalentResourceRegistry = runtime.NewEquivalentResourceRegistry() + } else { + c.EquivalentResourceRegistry = runtime.NewEquivalentResourceRegistryWithIdentity(func(groupResource schema.GroupResource) string { + // use the storage prefix as the key if possible + if opts, err := c.RESTOptionsGetter.GetRESTOptions(groupResource); err == nil { + return opts.ResourcePrefix + } + // otherwise return "" to use the default key (parent GV name) + return "" + }) + } + } + + return CompletedConfig{&completedConfig{c, informers}} +} + +// Complete fills in any fields not set that are required to have valid data and can be derived +// from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver. +func (c *RecommendedConfig) Complete() CompletedConfig { + return c.Config.Complete(c.SharedInformerFactory) +} + +// New creates a new server which logically combines the handling chain with the passed server. +// name is used to differentiate for logging. The handler chain in particular can be difficult as it starts delgating. +// delegationTarget may not be nil. +func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*GenericAPIServer, error) { + if c.Serializer == nil { + return nil, fmt.Errorf("Genericapiserver.New() called with config.Serializer == nil") + } + if c.LoopbackClientConfig == nil { + return nil, fmt.Errorf("Genericapiserver.New() called with config.LoopbackClientConfig == nil") + } + if c.EquivalentResourceRegistry == nil { + return nil, fmt.Errorf("Genericapiserver.New() called with config.EquivalentResourceRegistry == nil") + } + + handlerChainBuilder := func(handler http.Handler) http.Handler { + return c.BuildHandlerChainFunc(handler, c.Config) + } + apiServerHandler := NewAPIServerHandler(name, c.Serializer, handlerChainBuilder, delegationTarget.UnprotectedHandler()) + + s := &GenericAPIServer{ + discoveryAddresses: c.DiscoveryAddresses, + LoopbackClientConfig: c.LoopbackClientConfig, + legacyAPIGroupPrefixes: c.LegacyAPIGroupPrefixes, + admissionControl: c.AdmissionControl, + Serializer: c.Serializer, + AuditBackend: c.AuditBackend, + Authorizer: c.Authorization.Authorizer, + delegationTarget: delegationTarget, + EquivalentResourceRegistry: c.EquivalentResourceRegistry, + HandlerChainWaitGroup: c.HandlerChainWaitGroup, + + minRequestTimeout: time.Duration(c.MinRequestTimeout) * time.Second, + ShutdownTimeout: c.RequestTimeout, + ShutdownDelayDuration: c.ShutdownDelayDuration, + SecureServingInfo: c.SecureServing, + ExternalAddress: c.ExternalAddress, + + Handler: apiServerHandler, + + listedPathProvider: apiServerHandler, + + openAPIConfig: c.OpenAPIConfig, + + postStartHooks: map[string]postStartHookEntry{}, + preShutdownHooks: map[string]preShutdownHookEntry{}, + disabledPostStartHooks: c.DisabledPostStartHooks, + + healthzChecks: c.HealthzChecks, + livezChecks: c.LivezChecks, + readyzChecks: c.ReadyzChecks, + readinessStopCh: make(chan struct{}), + livezGracePeriod: c.LivezGracePeriod, + + DiscoveryGroupManager: discovery.NewRootAPIsHandler(c.DiscoveryAddresses, c.Serializer), + + maxRequestBodyBytes: c.MaxRequestBodyBytes, + livezClock: clock.RealClock{}, + + APIServerID: c.APIServerID, + StorageVersionManager: c.StorageVersionManager, + } + + for { + if c.JSONPatchMaxCopyBytes <= 0 { + break + } + existing := atomic.LoadInt64(&jsonpatch.AccumulatedCopySizeLimit) + if existing > 0 && existing < c.JSONPatchMaxCopyBytes { + break + } + if atomic.CompareAndSwapInt64(&jsonpatch.AccumulatedCopySizeLimit, existing, c.JSONPatchMaxCopyBytes) { + break + } + } + + // first add poststarthooks from delegated targets + for k, v := range delegationTarget.PostStartHooks() { + s.postStartHooks[k] = v + } + + for k, v := range delegationTarget.PreShutdownHooks() { + s.preShutdownHooks[k] = v + } + + // add poststarthooks that were preconfigured. Using the add method will give us an error if the same name has already been registered. + for name, preconfiguredPostStartHook := range c.PostStartHooks { + if err := s.AddPostStartHook(name, preconfiguredPostStartHook.hook); err != nil { + return nil, err + } + } + + genericApiServerHookName := "generic-apiserver-start-informers" + if c.SharedInformerFactory != nil { + if !s.isPostStartHookRegistered(genericApiServerHookName) { + err := s.AddPostStartHook(genericApiServerHookName, func(context PostStartHookContext) error { + c.SharedInformerFactory.Start(context.StopCh) + return nil + }) + if err != nil { + return nil, err + } + } + // TODO: Once we get rid of /healthz consider changing this to post-start-hook. + err := s.addReadyzChecks(healthz.NewInformerSyncHealthz(c.SharedInformerFactory)) + if err != nil { + return nil, err + } + } + + const priorityAndFairnessConfigConsumerHookName = "priority-and-fairness-config-consumer" + if s.isPostStartHookRegistered(priorityAndFairnessConfigConsumerHookName) { + } else if c.FlowControl != nil { + err := s.AddPostStartHook(priorityAndFairnessConfigConsumerHookName, func(context PostStartHookContext) error { + go c.FlowControl.MaintainObservations(context.StopCh) + go c.FlowControl.Run(context.StopCh) + return nil + }) + if err != nil { + return nil, err + } + // TODO(yue9944882): plumb pre-shutdown-hook for request-management system? + } else { + klog.V(3).Infof("Not requested to run hook %s", priorityAndFairnessConfigConsumerHookName) + } + + // Add PostStartHooks for maintaining the watermarks for the Priority-and-Fairness and the Max-in-Flight filters. + if c.FlowControl != nil { + const priorityAndFairnessFilterHookName = "priority-and-fairness-filter" + if !s.isPostStartHookRegistered(priorityAndFairnessFilterHookName) { + err := s.AddPostStartHook(priorityAndFairnessFilterHookName, func(context PostStartHookContext) error { + genericfilters.StartPriorityAndFairnessWatermarkMaintenance(context.StopCh) + return nil + }) + if err != nil { + return nil, err + } + } + } else { + const maxInFlightFilterHookName = "max-in-flight-filter" + if !s.isPostStartHookRegistered(maxInFlightFilterHookName) { + err := s.AddPostStartHook(maxInFlightFilterHookName, func(context PostStartHookContext) error { + genericfilters.StartMaxInFlightWatermarkMaintenance(context.StopCh) + return nil + }) + if err != nil { + return nil, err + } + } + } + + for _, delegateCheck := range delegationTarget.HealthzChecks() { + skip := false + for _, existingCheck := range c.HealthzChecks { + if existingCheck.Name() == delegateCheck.Name() { + skip = true + break + } + } + if skip { + continue + } + s.AddHealthChecks(delegateCheck) + } + + s.listedPathProvider = routes.ListedPathProviders{s.listedPathProvider, delegationTarget} + + installAPI(s, c.Config) + + // use the UnprotectedHandler from the delegation target to ensure that we don't attempt to double authenticator, authorize, + // or some other part of the filter chain in delegation cases. + if delegationTarget.UnprotectedHandler() == nil && c.EnableIndex { + s.Handler.NonGoRestfulMux.NotFoundHandler(routes.IndexLister{ + StatusCode: http.StatusNotFound, + PathProvider: s.listedPathProvider, + }) + } + + return s, nil +} + +func BuildHandlerChainWithStorageVersionPrecondition(apiHandler http.Handler, c *Config) http.Handler { + // WithStorageVersionPrecondition needs the WithRequestInfo to run first + handler := genericapifilters.WithStorageVersionPrecondition(apiHandler, c.StorageVersionManager, c.Serializer) + return DefaultBuildHandlerChain(handler, c) +} + +func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { + handler := filterlatency.TrackCompleted(apiHandler) + handler = genericapifilters.WithAuthorization(handler, c.Authorization.Authorizer, c.Serializer) + handler = filterlatency.TrackStarted(handler, "authorization") + + if c.FlowControl != nil { + handler = filterlatency.TrackCompleted(handler) + handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl) + handler = filterlatency.TrackStarted(handler, "priorityandfairness") + } else { + handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.LongRunningFunc) + } + + handler = filterlatency.TrackCompleted(handler) + handler = genericapifilters.WithImpersonation(handler, c.Authorization.Authorizer, c.Serializer) + handler = filterlatency.TrackStarted(handler, "impersonation") + + handler = filterlatency.TrackCompleted(handler) + handler = genericapifilters.WithAudit(handler, c.AuditBackend, c.AuditPolicyChecker, c.LongRunningFunc) + handler = filterlatency.TrackStarted(handler, "audit") + + failedHandler := genericapifilters.Unauthorized(c.Serializer) + failedHandler = genericapifilters.WithFailedAuthenticationAudit(failedHandler, c.AuditBackend, c.AuditPolicyChecker) + + failedHandler = filterlatency.TrackCompleted(failedHandler) + handler = filterlatency.TrackCompleted(handler) + handler = genericapifilters.WithAuthentication(handler, c.Authentication.Authenticator, failedHandler, c.Authentication.APIAudiences) + handler = filterlatency.TrackStarted(handler, "authentication") + + handler = genericfilters.WithCORS(handler, c.CorsAllowedOriginList, nil, nil, nil, "true") + handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.LongRunningFunc, c.RequestTimeout) + handler = genericfilters.WithWaitGroup(handler, c.LongRunningFunc, c.HandlerChainWaitGroup) + handler = genericapifilters.WithRequestInfo(handler, c.RequestInfoResolver) + if c.SecureServing != nil && !c.SecureServing.DisableHTTP2 && c.GoawayChance > 0 { + handler = genericfilters.WithProbabilisticGoaway(handler, c.GoawayChance) + } + handler = genericapifilters.WithAuditAnnotations(handler, c.AuditBackend, c.AuditPolicyChecker) + handler = genericapifilters.WithWarningRecorder(handler) + handler = genericapifilters.WithCacheControl(handler) + handler = genericapifilters.WithRequestReceivedTimestamp(handler) + handler = genericfilters.WithPanicRecovery(handler, c.RequestInfoResolver) + return handler +} + +func installAPI(s *GenericAPIServer, c *Config) { + if c.EnableIndex { + routes.Index{}.Install(s.listedPathProvider, s.Handler.NonGoRestfulMux) + } + if c.EnableProfiling { + routes.Profiling{}.Install(s.Handler.NonGoRestfulMux) + if c.EnableContentionProfiling { + goruntime.SetBlockProfileRate(1) + } + // so far, only logging related endpoints are considered valid to add for these debug flags. + routes.DebugFlags{}.Install(s.Handler.NonGoRestfulMux, "v", routes.StringFlagPutHandler(logs.GlogSetter)) + } + if c.EnableMetrics { + if c.EnableProfiling { + routes.MetricsWithReset{}.Install(s.Handler.NonGoRestfulMux) + } else { + routes.DefaultMetrics{}.Install(s.Handler.NonGoRestfulMux) + } + } + + routes.Version{Version: c.Version}.Install(s.Handler.GoRestfulContainer) + + if c.EnableDiscovery { + s.Handler.GoRestfulContainer.Add(s.DiscoveryGroupManager.WebService()) + } + if c.FlowControl != nil && feature.DefaultFeatureGate.Enabled(features.APIPriorityAndFairness) { + c.FlowControl.Install(s.Handler.NonGoRestfulMux) + } +} + +func NewRequestInfoResolver(c *Config) *apirequest.RequestInfoFactory { + apiPrefixes := sets.NewString(strings.Trim(APIGroupPrefix, "/")) // all possible API prefixes + legacyAPIPrefixes := sets.String{} // APIPrefixes that won't have groups (legacy) + for legacyAPIPrefix := range c.LegacyAPIGroupPrefixes { + apiPrefixes.Insert(strings.Trim(legacyAPIPrefix, "/")) + legacyAPIPrefixes.Insert(strings.Trim(legacyAPIPrefix, "/")) + } + + return &apirequest.RequestInfoFactory{ + APIPrefixes: apiPrefixes, + GrouplessAPIPrefixes: legacyAPIPrefixes, + } +} + +func (s *SecureServingInfo) HostPort() (string, int, error) { + if s == nil || s.Listener == nil { + return "", 0, fmt.Errorf("no listener found") + } + addr := s.Listener.Addr().String() + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return "", 0, fmt.Errorf("failed to get port from listener address %q: %v", addr, err) + } + port, err := utilsnet.ParsePort(portStr, true) + if err != nil { + return "", 0, fmt.Errorf("invalid non-numeric port %q", portStr) + } + return host, port, nil +} + +// AuthorizeClientBearerToken wraps the authenticator and authorizer in loopback authentication logic +// if the loopback client config is specified AND it has a bearer token. Note that if either authn or +// authz is nil, this function won't add a token authenticator or authorizer. +func AuthorizeClientBearerToken(loopback *restclient.Config, authn *AuthenticationInfo, authz *AuthorizationInfo) { + if loopback == nil || len(loopback.BearerToken) == 0 { + return + } + if authn == nil || authz == nil { + // prevent nil pointer panic + return + } + if authn.Authenticator == nil || authz.Authorizer == nil { + // authenticator or authorizer might be nil if we want to bypass authz/authn + // and we also do nothing in this case. + return + } + + privilegedLoopbackToken := loopback.BearerToken + var uid = uuid.New().String() + tokens := make(map[string]*user.DefaultInfo) + tokens[privilegedLoopbackToken] = &user.DefaultInfo{ + Name: user.APIServerUser, + UID: uid, + Groups: []string{user.SystemPrivilegedGroup}, + } + + tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens) + authn.Authenticator = authenticatorunion.New(tokenAuthenticator, authn.Authenticator) + + tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) + authz.Authorizer = authorizerunion.New(tokenAuthorizer, authz.Authorizer) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go b/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go new file mode 100644 index 0000000000..f2c2de9b32 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go @@ -0,0 +1,97 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "fmt" + "net" + + restclient "k8s.io/client-go/rest" + netutils "k8s.io/utils/net" +) + +// LoopbackClientServerNameOverride is passed to the apiserver from the loopback client in order to +// select the loopback certificate via SNI if TLS is used. +const LoopbackClientServerNameOverride = "apiserver-loopback-client" + +func (s *SecureServingInfo) NewClientConfig(caCert []byte) (*restclient.Config, error) { + if s == nil || (s.Cert == nil && len(s.SNICerts) == 0) { + return nil, nil + } + + host, port, err := LoopbackHostPort(s.Listener.Addr().String()) + if err != nil { + return nil, err + } + + return &restclient.Config{ + // Do not limit loopback client QPS. + QPS: -1, + Host: "https://" + net.JoinHostPort(host, port), + // override the ServerName to select our loopback certificate via SNI. This name is also + // used by the client to compare the returns server certificate against. + TLSClientConfig: restclient.TLSClientConfig{ + CAData: caCert, + }, + }, nil +} + +func (s *SecureServingInfo) NewLoopbackClientConfig(token string, loopbackCert []byte) (*restclient.Config, error) { + c, err := s.NewClientConfig(loopbackCert) + if err != nil || c == nil { + return c, err + } + + c.BearerToken = token + c.TLSClientConfig.ServerName = LoopbackClientServerNameOverride + + return c, nil +} + +// LoopbackHostPort returns the host and port loopback REST clients should use +// to contact the server. +func LoopbackHostPort(bindAddress string) (string, string, error) { + host, port, err := net.SplitHostPort(bindAddress) + if err != nil { + // should never happen + return "", "", fmt.Errorf("invalid server bind address: %q", bindAddress) + } + + isIPv6 := netutils.IsIPv6String(host) + + // Value is expected to be an IP or DNS name, not "0.0.0.0". + if host == "0.0.0.0" || host == "::" { + // Get ip of local interface, but fall back to "localhost". + // Note that "localhost" is resolved with the external nameserver first with Go's stdlib. + // So if localhost. resolves, we don't get a 127.0.0.1 as expected. + host = getLoopbackAddress(isIPv6) + } + return host, port, nil +} + +// getLoopbackAddress returns the ip address of local loopback interface. If any error occurs or loopback interface is not found, will fall back to "localhost" +func getLoopbackAddress(wantIPv6 bool) string { + addrs, err := net.InterfaceAddrs() + if err == nil { + for _, address := range addrs { + if ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsLoopback() && wantIPv6 == netutils.IsIPv6(ipnet.IP) { + return ipnet.IP.String() + } + } + } + return "localhost" +} diff --git a/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go b/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go new file mode 100644 index 0000000000..655543a251 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "net" + "net/http" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/rest" +) + +// DeprecatedInsecureServingInfo is the main context object for the insecure http server. +// HTTP does NOT include authentication or authorization. +// You shouldn't be using this. It makes sig-auth sad. +type DeprecatedInsecureServingInfo struct { + // Listener is the secure server network listener. + Listener net.Listener + // optional server name for log messages + Name string +} + +// Serve starts an insecure http server with the given handler. It fails only if +// the initial listen call fails. It does not block. +func (s *DeprecatedInsecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) error { + insecureServer := &http.Server{ + Addr: s.Listener.Addr().String(), + Handler: handler, + MaxHeaderBytes: 1 << 20, + } + + if len(s.Name) > 0 { + klog.Infof("Serving %s insecurely on %s", s.Name, s.Listener.Addr()) + } else { + klog.Infof("Serving insecurely on %s", s.Listener.Addr()) + } + _, err := RunServer(insecureServer, s.Listener, shutdownTimeout, stopCh) + // NOTE: we do not handle stoppedCh returned by RunServer for graceful termination here + return err +} + +func (s *DeprecatedInsecureServingInfo) NewLoopbackClientConfig() (*rest.Config, error) { + if s == nil { + return nil, nil + } + + host, port, err := LoopbackHostPort(s.Listener.Addr().String()) + if err != nil { + return nil, err + } + + return &rest.Config{ + Host: "http://" + net.JoinHostPort(host, port), + // Increase QPS limits. The client is currently passed to all admission plugins, + // and those can be throttled in case of higher load on apiserver - see #22340 and #22422 + // for more details. Once #22422 is fixed, we may want to remove it. + QPS: 50, + Burst: 100, + }, nil +} + +// InsecureSuperuser implements authenticator.Request to always return a superuser. +// This is functionally equivalent to skipping authentication and authorization, +// but allows apiserver code to stop special-casing a nil user to skip authorization checks. +type InsecureSuperuser struct{} + +func (InsecureSuperuser) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + auds, _ := authenticator.AudiencesFrom(req.Context()) + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: "system:unsecured", + Groups: []string{user.SystemPrivilegedGroup, user.AllAuthenticated}, + }, + Audiences: auds, + }, true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/doc.go b/vendor/k8s.io/apiserver/pkg/server/doc.go new file mode 100644 index 0000000000..bc671eae84 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package server contains the plumbing to create kubernetes-like API server command. +package server // import "k8s.io/apiserver/pkg/server" diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go new file mode 100644 index 0000000000..114002b1a0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" +) + +// CertKeyContentProvider provides a certificate and matching private key +type CertKeyContentProvider interface { + // Name is just an identifier + Name() string + // CurrentCertKeyContent provides cert and key byte content + CurrentCertKeyContent() ([]byte, []byte) +} + +// SNICertKeyContentProvider provides a certificate and matching private key as well as optional explicit names +type SNICertKeyContentProvider interface { + CertKeyContentProvider + // SNINames provides names used for SNI. May return nil. + SNINames() []string +} + +// certKeyContent holds the content for the cert and key +type certKeyContent struct { + cert []byte + key []byte +} + +func (c *certKeyContent) Equal(rhs *certKeyContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + return bytes.Equal(c.key, rhs.key) && bytes.Equal(c.cert, rhs.cert) +} + +// sniCertKeyContent holds the content for the cert and key as well as any explicit names +type sniCertKeyContent struct { + certKeyContent + sniNames []string +} + +func (c *sniCertKeyContent) Equal(rhs *sniCertKeyContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + if len(c.sniNames) != len(rhs.sniNames) { + return false + } + + for i := range c.sniNames { + if c.sniNames[i] != rhs.sniNames[i] { + return false + } + } + + return c.certKeyContent.Equal(&rhs.certKeyContent) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go new file mode 100644 index 0000000000..4348fa3878 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "crypto/x509" +) + +// CAContentProvider provides ca bundle byte content +type CAContentProvider interface { + // Name is just an identifier + Name() string + // CurrentCABundleContent provides ca bundle byte content. Errors can be contained to the controllers initializing + // the value. By the time you get here, you should always be returning a value that won't fail. + CurrentCABundleContent() []byte + // VerifyOptions provides VerifyOptions for authenticators + VerifyOptions() (x509.VerifyOptions, bool) +} + +// dynamicCertificateContent holds the content that overrides the baseTLSConfig +type dynamicCertificateContent struct { + // clientCA holds the content for the clientCA bundle + clientCA caBundleContent + servingCert certKeyContent + sniCerts []sniCertKeyContent +} + +// caBundleContent holds the content for the clientCA bundle. Wrapping the bytes makes the Equals work nicely with the +// method receiver. +type caBundleContent struct { + caBundle []byte +} + +func (c *dynamicCertificateContent) Equal(rhs *dynamicCertificateContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + if !c.clientCA.Equal(&rhs.clientCA) { + return false + } + + if !c.servingCert.Equal(&rhs.servingCert) { + return false + } + + if len(c.sniCerts) != len(rhs.sniCerts) { + return false + } + + for i := range c.sniCerts { + if !c.sniCerts[i].Equal(&rhs.sniCerts[i]) { + return false + } + } + + return true +} + +func (c *caBundleContent) Equal(rhs *caBundleContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + return bytes.Equal(c.caBundle, rhs.caBundle) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go new file mode 100644 index 0000000000..ec0fc5096a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go @@ -0,0 +1,274 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "crypto/x509" + "fmt" + "sync/atomic" + "time" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + corev1informers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// ConfigMapCAController provies a CAContentProvider that can dynamically react to configmap changes +// It also fulfills the authenticator interface to provide verifyoptions +type ConfigMapCAController struct { + name string + + configmapLister corev1listers.ConfigMapLister + configmapNamespace string + configmapName string + configmapKey string + // configMapInformer is tracked so that we can start these on Run + configMapInformer cache.SharedIndexInformer + + // caBundle is a caBundleAndVerifier that contains the last read, non-zero length content of the file + caBundle atomic.Value + + listeners []Listener + + queue workqueue.RateLimitingInterface + // preRunCaches are the caches to sync before starting the work of this control loop + preRunCaches []cache.InformerSynced +} + +var _ Notifier = &ConfigMapCAController{} +var _ CAContentProvider = &ConfigMapCAController{} +var _ ControllerRunner = &ConfigMapCAController{} + +// NewDynamicCAFromConfigMapController returns a CAContentProvider based on a configmap that automatically reloads content. +// It is near-realtime via an informer. +func NewDynamicCAFromConfigMapController(purpose, namespace, name, key string, kubeClient kubernetes.Interface) (*ConfigMapCAController, error) { + if len(purpose) == 0 { + return nil, fmt.Errorf("missing purpose for ca bundle") + } + if len(namespace) == 0 { + return nil, fmt.Errorf("missing namespace for ca bundle") + } + if len(name) == 0 { + return nil, fmt.Errorf("missing name for ca bundle") + } + if len(key) == 0 { + return nil, fmt.Errorf("missing key for ca bundle") + } + caContentName := fmt.Sprintf("%s::%s::%s::%s", purpose, namespace, name, key) + + // we construct our own informer because we need such a small subset of the information available. Just one namespace. + uncastConfigmapInformer := corev1informers.NewFilteredConfigMapInformer(kubeClient, namespace, 12*time.Hour, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, func(listOptions *v1.ListOptions) { + listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", name).String() + }) + + configmapLister := corev1listers.NewConfigMapLister(uncastConfigmapInformer.GetIndexer()) + + c := &ConfigMapCAController{ + name: caContentName, + configmapNamespace: namespace, + configmapName: name, + configmapKey: key, + configmapLister: configmapLister, + configMapInformer: uncastConfigmapInformer, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicConfigMapCABundle-%s", purpose)), + preRunCaches: []cache.InformerSynced{uncastConfigmapInformer.HasSynced}, + } + + uncastConfigmapInformer.AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + if cast, ok := obj.(*corev1.ConfigMap); ok { + return cast.Name == c.configmapName && cast.Namespace == c.configmapNamespace + } + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + if cast, ok := tombstone.Obj.(*corev1.ConfigMap); ok { + return cast.Name == c.configmapName && cast.Namespace == c.configmapNamespace + } + } + return true // always return true just in case. The checks are fairly cheap + }, + Handler: cache.ResourceEventHandlerFuncs{ + // we have a filter, so any time we're called, we may as well queue. We only ever check one configmap + // so we don't have to be choosy about our key. + AddFunc: func(obj interface{}) { + c.queue.Add(c.keyFn()) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + c.queue.Add(c.keyFn()) + }, + DeleteFunc: func(obj interface{}) { + c.queue.Add(c.keyFn()) + }, + }, + }) + + return c, nil +} + +func (c *ConfigMapCAController) keyFn() string { + // this format matches DeletionHandlingMetaNamespaceKeyFunc for our single key + return c.configmapNamespace + "/" + c.configmapName +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c *ConfigMapCAController) AddListener(listener Listener) { + c.listeners = append(c.listeners, listener) +} + +// loadCABundle determines the next set of content for the file. +func (c *ConfigMapCAController) loadCABundle() error { + configMap, err := c.configmapLister.ConfigMaps(c.configmapNamespace).Get(c.configmapName) + if err != nil { + return err + } + caBundle := configMap.Data[c.configmapKey] + if len(caBundle) == 0 { + return fmt.Errorf("missing content for CA bundle %q", c.Name()) + } + + // check to see if we have a change. If the values are the same, do nothing. + if !c.hasCAChanged([]byte(caBundle)) { + return nil + } + + caBundleAndVerifier, err := newCABundleAndVerifier(c.Name(), []byte(caBundle)) + if err != nil { + return err + } + c.caBundle.Store(caBundleAndVerifier) + + for _, listener := range c.listeners { + listener.Enqueue() + } + + return nil +} + +// hasCAChanged returns true if the caBundle is different than the current. +func (c *ConfigMapCAController) hasCAChanged(caBundle []byte) bool { + uncastExisting := c.caBundle.Load() + if uncastExisting == nil { + return true + } + + // check to see if we have a change. If the values are the same, do nothing. + existing, ok := uncastExisting.(*caBundleAndVerifier) + if !ok { + return true + } + if !bytes.Equal(existing.caBundle, caBundle) { + return true + } + + return false +} + +// RunOnce runs a single sync loop +func (c *ConfigMapCAController) RunOnce() error { + // Ignore the error when running once because when using a dynamically loaded ca file, because we think it's better to have nothing for + // a brief time than completely crash. If crashing is necessary, higher order logic like a healthcheck and cause failures. + _ = c.loadCABundle() + return nil +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *ConfigMapCAController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting %s", c.name) + defer klog.Infof("Shutting down %s", c.name) + + // we have a personal informer that is narrowly scoped, start it. + go c.configMapInformer.Run(stopCh) + + // wait for your secondary caches to fill before starting your work + if !cache.WaitForNamedCacheSync(c.name, stopCh, c.preRunCaches...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + go wait.PollImmediateUntil(FileRefreshDuration, func() (bool, error) { + c.queue.Add(workItemKey) + return false, nil + }, stopCh) + + <-stopCh +} + +func (c *ConfigMapCAController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *ConfigMapCAController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadCABundle() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Name is just an identifier +func (c *ConfigMapCAController) Name() string { + return c.name +} + +// CurrentCABundleContent provides ca bundle byte content +func (c *ConfigMapCAController) CurrentCABundleContent() []byte { + uncastObj := c.caBundle.Load() + if uncastObj == nil { + return nil // this can happen if we've been unable load data from the apiserver for some reason + } + + return c.caBundle.Load().(*caBundleAndVerifier).caBundle +} + +// VerifyOptions provides verifyoptions compatible with authenticators +func (c *ConfigMapCAController) VerifyOptions() (x509.VerifyOptions, bool) { + uncastObj := c.caBundle.Load() + if uncastObj == nil { + // This can happen if we've been unable load data from the apiserver for some reason. + // In this case, we should not accept any connections on the basis of this ca bundle. + return x509.VerifyOptions{}, false + } + + return uncastObj.(*caBundleAndVerifier).verifyOptions, true +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go new file mode 100644 index 0000000000..756289a802 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go @@ -0,0 +1,255 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "crypto/x509" + "fmt" + "io/ioutil" + "sync/atomic" + "time" + + "k8s.io/client-go/util/cert" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// FileRefreshDuration is exposed so that integration tests can crank up the reload speed. +var FileRefreshDuration = 1 * time.Minute + +// Listener is an interface to use to notify interested parties of a change. +type Listener interface { + // Enqueue should be called when an input may have changed + Enqueue() +} + +// Notifier is a way to add listeners +type Notifier interface { + // AddListener is adds a listener to be notified of potential input changes + AddListener(listener Listener) +} + +// ControllerRunner is a generic interface for starting a controller +type ControllerRunner interface { + // RunOnce runs the sync loop a single time. This useful for synchronous priming + RunOnce() error + + // Run should be called a go .Run + Run(workers int, stopCh <-chan struct{}) +} + +// DynamicFileCAContent provies a CAContentProvider that can dynamically react to new file content +// It also fulfills the authenticator interface to provide verifyoptions +type DynamicFileCAContent struct { + name string + + // filename is the name the file to read. + filename string + + // caBundle is a caBundleAndVerifier that contains the last read, non-zero length content of the file + caBundle atomic.Value + + listeners []Listener + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface +} + +var _ Notifier = &DynamicFileCAContent{} +var _ CAContentProvider = &DynamicFileCAContent{} +var _ ControllerRunner = &DynamicFileCAContent{} + +type caBundleAndVerifier struct { + caBundle []byte + verifyOptions x509.VerifyOptions +} + +// NewDynamicCAContentFromFile returns a CAContentProvider based on a filename that automatically reloads content +func NewDynamicCAContentFromFile(purpose, filename string) (*DynamicFileCAContent, error) { + if len(filename) == 0 { + return nil, fmt.Errorf("missing filename for ca bundle") + } + name := fmt.Sprintf("%s::%s", purpose, filename) + + ret := &DynamicFileCAContent{ + name: name, + filename: filename, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)), + } + if err := ret.loadCABundle(); err != nil { + return nil, err + } + + return ret, nil +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c *DynamicFileCAContent) AddListener(listener Listener) { + c.listeners = append(c.listeners, listener) +} + +// loadCABundle determines the next set of content for the file. +func (c *DynamicFileCAContent) loadCABundle() error { + caBundle, err := ioutil.ReadFile(c.filename) + if err != nil { + return err + } + if len(caBundle) == 0 { + return fmt.Errorf("missing content for CA bundle %q", c.Name()) + } + + // check to see if we have a change. If the values are the same, do nothing. + if !c.hasCAChanged(caBundle) { + return nil + } + + caBundleAndVerifier, err := newCABundleAndVerifier(c.Name(), caBundle) + if err != nil { + return err + } + c.caBundle.Store(caBundleAndVerifier) + klog.V(2).Infof("Loaded a new CA Bundle and Verifier for %q", c.Name()) + + for _, listener := range c.listeners { + listener.Enqueue() + } + + return nil +} + +// hasCAChanged returns true if the caBundle is different than the current. +func (c *DynamicFileCAContent) hasCAChanged(caBundle []byte) bool { + uncastExisting := c.caBundle.Load() + if uncastExisting == nil { + return true + } + + // check to see if we have a change. If the values are the same, do nothing. + existing, ok := uncastExisting.(*caBundleAndVerifier) + if !ok { + return true + } + if !bytes.Equal(existing.caBundle, caBundle) { + return true + } + + return false +} + +// RunOnce runs a single sync loop +func (c *DynamicFileCAContent) RunOnce() error { + return c.loadCABundle() +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *DynamicFileCAContent) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting %s", c.name) + defer klog.Infof("Shutting down %s", c.name) + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + go wait.PollImmediateUntil(FileRefreshDuration, func() (bool, error) { + c.queue.Add(workItemKey) + return false, nil + }, stopCh) + + // TODO this can be wired to an fsnotifier as well. + + <-stopCh +} + +func (c *DynamicFileCAContent) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *DynamicFileCAContent) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadCABundle() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Name is just an identifier +func (c *DynamicFileCAContent) Name() string { + return c.name +} + +// CurrentCABundleContent provides ca bundle byte content +func (c *DynamicFileCAContent) CurrentCABundleContent() (cabundle []byte) { + return c.caBundle.Load().(*caBundleAndVerifier).caBundle +} + +// VerifyOptions provides verifyoptions compatible with authenticators +func (c *DynamicFileCAContent) VerifyOptions() (x509.VerifyOptions, bool) { + uncastObj := c.caBundle.Load() + if uncastObj == nil { + return x509.VerifyOptions{}, false + } + + return uncastObj.(*caBundleAndVerifier).verifyOptions, true +} + +// newVerifyOptions creates a new verification func from a file. It reads the content and then fails. +// It will return a nil function if you pass an empty CA file. +func newCABundleAndVerifier(name string, caBundle []byte) (*caBundleAndVerifier, error) { + if len(caBundle) == 0 { + return nil, fmt.Errorf("missing content for CA bundle %q", name) + } + + // Wrap with an x509 verifier + var err error + verifyOptions := defaultVerifyOptions() + verifyOptions.Roots, err = cert.NewPoolFromBytes(caBundle) + if err != nil { + return nil, fmt.Errorf("error loading CA bundle for %q: %v", name, err) + } + + return &caBundleAndVerifier{ + caBundle: caBundle, + verifyOptions: verifyOptions, + }, nil +} + +// defaultVerifyOptions returns VerifyOptions that use the system root certificates, current time, +// and requires certificates to be valid for client auth (x509.ExtKeyUsageClientAuth) +func defaultVerifyOptions() x509.VerifyOptions { + return x509.VerifyOptions{ + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go new file mode 100644 index 0000000000..3b7f34738b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "sync/atomic" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// DynamicCertKeyPairContent provides a CertKeyContentProvider that can dynamically react to new file content +type DynamicCertKeyPairContent struct { + name string + + // certFile is the name of the certificate file to read. + certFile string + // keyFile is the name of the key file to read. + keyFile string + + // servingCert is a certKeyContent that contains the last read, non-zero length content of the key and cert + certKeyPair atomic.Value + + listeners []Listener + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface +} + +var _ Notifier = &DynamicCertKeyPairContent{} +var _ CertKeyContentProvider = &DynamicCertKeyPairContent{} +var _ ControllerRunner = &DynamicCertKeyPairContent{} + +// NewDynamicServingContentFromFiles returns a dynamic CertKeyContentProvider based on a cert and key filename +func NewDynamicServingContentFromFiles(purpose, certFile, keyFile string) (*DynamicCertKeyPairContent, error) { + if len(certFile) == 0 || len(keyFile) == 0 { + return nil, fmt.Errorf("missing filename for serving cert") + } + name := fmt.Sprintf("%s::%s::%s", purpose, certFile, keyFile) + + ret := &DynamicCertKeyPairContent{ + name: name, + certFile: certFile, + keyFile: keyFile, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)), + } + if err := ret.loadCertKeyPair(); err != nil { + return nil, err + } + + return ret, nil +} + +// AddListener adds a listener to be notified when the serving cert content changes. +func (c *DynamicCertKeyPairContent) AddListener(listener Listener) { + c.listeners = append(c.listeners, listener) +} + +// loadServingCert determines the next set of content for the file. +func (c *DynamicCertKeyPairContent) loadCertKeyPair() error { + cert, err := ioutil.ReadFile(c.certFile) + if err != nil { + return err + } + key, err := ioutil.ReadFile(c.keyFile) + if err != nil { + return err + } + if len(cert) == 0 || len(key) == 0 { + return fmt.Errorf("missing content for serving cert %q", c.Name()) + } + + // Ensure that the key matches the cert and both are valid + _, err = tls.X509KeyPair(cert, key) + if err != nil { + return err + } + + newCertKey := &certKeyContent{ + cert: cert, + key: key, + } + + // check to see if we have a change. If the values are the same, do nothing. + existing, ok := c.certKeyPair.Load().(*certKeyContent) + if ok && existing != nil && existing.Equal(newCertKey) { + return nil + } + + c.certKeyPair.Store(newCertKey) + klog.V(2).Infof("Loaded a new cert/key pair for %q", c.Name()) + + for _, listener := range c.listeners { + listener.Enqueue() + } + + return nil +} + +// RunOnce runs a single sync loop +func (c *DynamicCertKeyPairContent) RunOnce() error { + return c.loadCertKeyPair() +} + +// Run starts the controller and blocks until stopCh is closed. +func (c *DynamicCertKeyPairContent) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting %s", c.name) + defer klog.Infof("Shutting down %s", c.name) + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + go wait.PollImmediateUntil(FileRefreshDuration, func() (bool, error) { + c.queue.Add(workItemKey) + return false, nil + }, stopCh) + + // TODO this can be wired to an fsnotifier as well. + + <-stopCh +} + +func (c *DynamicCertKeyPairContent) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *DynamicCertKeyPairContent) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadCertKeyPair() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Name is just an identifier +func (c *DynamicCertKeyPairContent) Name() string { + return c.name +} + +// CurrentCertKeyContent provides cert and key byte content +func (c *DynamicCertKeyPairContent) CurrentCertKeyContent() ([]byte, []byte) { + certKeyContent := c.certKeyPair.Load().(*certKeyContent) + return certKeyContent.cert, certKeyContent.key +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go new file mode 100644 index 0000000000..161fa1ca75 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +// DynamicFileSNIContent provides a SNICertKeyContentProvider that can dynamically react to new file content +type DynamicFileSNIContent struct { + *DynamicCertKeyPairContent + sniNames []string +} + +var _ Notifier = &DynamicFileSNIContent{} +var _ SNICertKeyContentProvider = &DynamicFileSNIContent{} +var _ ControllerRunner = &DynamicFileSNIContent{} + +// NewDynamicSNIContentFromFiles returns a dynamic SNICertKeyContentProvider based on a cert and key filename and explicit names +func NewDynamicSNIContentFromFiles(purpose, certFile, keyFile string, sniNames ...string) (*DynamicFileSNIContent, error) { + servingContent, err := NewDynamicServingContentFromFiles(purpose, certFile, keyFile) + if err != nil { + return nil, err + } + + ret := &DynamicFileSNIContent{ + DynamicCertKeyPairContent: servingContent, + sniNames: sniNames, + } + if err := ret.loadCertKeyPair(); err != nil { + return nil, err + } + + return ret, nil +} + +// SNINames returns explicitly set SNI names for the certificate. These are not dynamic. +func (c *DynamicFileSNIContent) SNINames() []string { + return c.sniNames +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go new file mode 100644 index 0000000000..8f55edec4e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog/v2" +) + +// BuildNamedCertificates returns a map of *tls.Certificate by name. It's +// suitable for use in tls.Config#NamedCertificates. Returns an error if any of the certs +// is invalid. Returns nil if len(certs) == 0 +func (c *DynamicServingCertificateController) BuildNamedCertificates(sniCerts []sniCertKeyContent) (map[string]*tls.Certificate, error) { + nameToCertificate := map[string]*tls.Certificate{} + byNameExplicit := map[string]*tls.Certificate{} + + // Iterate backwards so that earlier certs take precedence in the names map + for i := len(sniCerts) - 1; i >= 0; i-- { + cert, err := tls.X509KeyPair(sniCerts[i].cert, sniCerts[i].key) + if err != nil { + return nil, fmt.Errorf("invalid SNI cert keypair [%d/%q]: %v", i, c.sniCerts[i].Name(), err) + } + + // error is not possible given above call to X509KeyPair + x509Cert, _ := x509.ParseCertificate(cert.Certificate[0]) + + names := sniCerts[i].sniNames + for _, name := range names { + byNameExplicit[name] = &cert + } + + klog.V(2).Infof("loaded SNI cert [%d/%q]: %s", i, c.sniCerts[i].Name(), GetHumanCertDetail(x509Cert)) + if c.eventRecorder != nil { + c.eventRecorder.Eventf(&corev1.ObjectReference{Name: c.sniCerts[i].Name()}, nil, corev1.EventTypeWarning, "TLSConfigChanged", "SNICertificateReload", "loaded SNI cert [%d/%q]: %s with explicit names %v", i, c.sniCerts[i].Name(), GetHumanCertDetail(x509Cert), names) + } + + if len(names) == 0 { + names = getCertificateNames(x509Cert) + for _, name := range names { + nameToCertificate[name] = &cert + } + } + } + + // Explicitly set names must override + for k, v := range byNameExplicit { + nameToCertificate[k] = v + } + + return nameToCertificate, nil +} + +// getCertificateNames returns names for an x509.Certificate. The names are +// suitable for use in tls.Config#NamedCertificates. +func getCertificateNames(cert *x509.Certificate) []string { + var names []string + + cn := cert.Subject.CommonName + cnIsIP := net.ParseIP(cn) != nil + cnIsValidDomain := cn == "*" || len(validation.IsDNS1123Subdomain(strings.TrimPrefix(cn, "*."))) == 0 + // don't use the CN if it is a valid IP because our IP serving detection may unexpectedly use it to terminate the connection. + if !cnIsIP && cnIsValidDomain { + names = append(names, cn) + } + for _, san := range cert.DNSNames { + names = append(names, san) + } + // intentionally all IPs in the cert are ignored as SNI forbids passing IPs + // to select a cert. Before go 1.6 the tls happily passed IPs as SNI values. + + return names +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go new file mode 100644 index 0000000000..c877dfe6c6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go @@ -0,0 +1,114 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "crypto/x509" +) + +type staticCAContent struct { + name string + caBundle *caBundleAndVerifier +} + +var _ CAContentProvider = &staticCAContent{} + +// NewStaticCAContent returns a CAContentProvider that always returns the same value +func NewStaticCAContent(name string, caBundle []byte) (CAContentProvider, error) { + caBundleAndVerifier, err := newCABundleAndVerifier(name, caBundle) + if err != nil { + return nil, err + } + + return &staticCAContent{ + name: name, + caBundle: caBundleAndVerifier, + }, nil +} + +// Name is just an identifier +func (c *staticCAContent) Name() string { + return c.name +} + +// CurrentCABundleContent provides ca bundle byte content +func (c *staticCAContent) CurrentCABundleContent() (cabundle []byte) { + return c.caBundle.caBundle +} + +func (c *staticCAContent) VerifyOptions() (x509.VerifyOptions, bool) { + return c.caBundle.verifyOptions, true +} + +type staticCertKeyContent struct { + name string + cert []byte + key []byte +} + +type staticSNICertKeyContent struct { + staticCertKeyContent + sniNames []string +} + +// NewStaticCertKeyContent returns a CertKeyContentProvider that always returns the same value +func NewStaticCertKeyContent(name string, cert, key []byte) (CertKeyContentProvider, error) { + // Ensure that the key matches the cert and both are valid + _, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, err + } + + return &staticCertKeyContent{ + name: name, + cert: cert, + key: key, + }, nil +} + +// NewStaticSNICertKeyContent returns a SNICertKeyContentProvider that always returns the same value +func NewStaticSNICertKeyContent(name string, cert, key []byte, sniNames ...string) (SNICertKeyContentProvider, error) { + // Ensure that the key matches the cert and both are valid + _, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, err + } + + return &staticSNICertKeyContent{ + staticCertKeyContent: staticCertKeyContent{ + name: name, + cert: cert, + key: key, + }, + sniNames: sniNames, + }, nil +} + +// Name is just an identifier +func (c *staticCertKeyContent) Name() string { + return c.name +} + +// CurrentCertKeyContent provides cert and key content +func (c *staticCertKeyContent) CurrentCertKeyContent() ([]byte, []byte) { + return c.cert, c.key +} + +func (c *staticSNICertKeyContent) SNINames() []string { + return c.sniNames +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go new file mode 100644 index 0000000000..f637f32331 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go @@ -0,0 +1,284 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net" + "sync/atomic" + "time" + + corev1 "k8s.io/api/core/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/events" + "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +const workItemKey = "key" + +// DynamicServingCertificateController dynamically loads certificates and provides a golang tls compatible dynamic GetCertificate func. +type DynamicServingCertificateController struct { + // baseTLSConfig is the static portion of the tlsConfig for serving to clients. It is copied and the copy is mutated + // based on the dynamic cert state. + baseTLSConfig *tls.Config + + // clientCA provides the very latest content of the ca bundle + clientCA CAContentProvider + // servingCert provides the very latest content of the default serving certificate + servingCert CertKeyContentProvider + // sniCerts are a list of CertKeyContentProvider with associated names used for SNI + sniCerts []SNICertKeyContentProvider + + // currentlyServedContent holds the original bytes that we are serving. This is used to decide if we need to set a + // new atomic value. The types used for efficient TLSConfig preclude using the processed value. + currentlyServedContent *dynamicCertificateContent + // currentServingTLSConfig holds a *tls.Config that will be used to serve requests + currentServingTLSConfig atomic.Value + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface + eventRecorder events.EventRecorder +} + +var _ Listener = &DynamicServingCertificateController{} + +// NewDynamicServingCertificateController returns a controller that can be used to keep a TLSConfig up to date. +func NewDynamicServingCertificateController( + baseTLSConfig *tls.Config, + clientCA CAContentProvider, + servingCert CertKeyContentProvider, + sniCerts []SNICertKeyContentProvider, + eventRecorder events.EventRecorder, +) *DynamicServingCertificateController { + c := &DynamicServingCertificateController{ + baseTLSConfig: baseTLSConfig, + clientCA: clientCA, + servingCert: servingCert, + sniCerts: sniCerts, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicServingCertificateController"), + eventRecorder: eventRecorder, + } + + return c +} + +// GetConfigForClient is an implementation of tls.Config.GetConfigForClient +func (c *DynamicServingCertificateController) GetConfigForClient(clientHello *tls.ClientHelloInfo) (*tls.Config, error) { + uncastObj := c.currentServingTLSConfig.Load() + if uncastObj == nil { + return nil, errors.New("dynamiccertificates: configuration not ready") + } + tlsConfig, ok := uncastObj.(*tls.Config) + if !ok { + return nil, errors.New("dynamiccertificates: unexpected config type") + } + + tlsConfigCopy := tlsConfig.Clone() + + // if the client set SNI information, just use our "normal" SNI flow + if len(clientHello.ServerName) > 0 { + return tlsConfigCopy, nil + } + + // if the client didn't set SNI, then we need to inspect the requested IP so that we can choose + // a certificate from our list if we specifically handle that IP. This can happen when an IP is specifically mapped by name. + host, _, err := net.SplitHostPort(clientHello.Conn.LocalAddr().String()) + if err != nil { + return tlsConfigCopy, nil + } + + ipCert, ok := tlsConfigCopy.NameToCertificate[host] + if !ok { + return tlsConfigCopy, nil + } + tlsConfigCopy.Certificates = []tls.Certificate{*ipCert} + tlsConfigCopy.NameToCertificate = nil + + return tlsConfigCopy, nil +} + +// newTLSContent determines the next set of content for overriding the baseTLSConfig. +func (c *DynamicServingCertificateController) newTLSContent() (*dynamicCertificateContent, error) { + newContent := &dynamicCertificateContent{} + + if c.clientCA != nil { + currClientCABundle := c.clientCA.CurrentCABundleContent() + // we allow removing all client ca bundles because the server is still secure when this happens. it just means + // that there isn't a hint to clients about which client-cert to used. this happens when there is no client-ca + // yet known for authentication, which can happen in aggregated apiservers and some kube-apiserver deployment modes. + newContent.clientCA = caBundleContent{caBundle: currClientCABundle} + } + + if c.servingCert != nil { + currServingCert, currServingKey := c.servingCert.CurrentCertKeyContent() + if len(currServingCert) == 0 || len(currServingKey) == 0 { + return nil, fmt.Errorf("not loading an empty serving certificate from %q", c.servingCert.Name()) + } + + newContent.servingCert = certKeyContent{cert: currServingCert, key: currServingKey} + } + + for i, sniCert := range c.sniCerts { + currCert, currKey := sniCert.CurrentCertKeyContent() + if len(currCert) == 0 || len(currKey) == 0 { + return nil, fmt.Errorf("not loading an empty SNI certificate from %d/%q", i, sniCert.Name()) + } + + newContent.sniCerts = append(newContent.sniCerts, sniCertKeyContent{certKeyContent: certKeyContent{cert: currCert, key: currKey}, sniNames: sniCert.SNINames()}) + } + + return newContent, nil +} + +// syncCerts gets newTLSContent, if it has changed from the existing, the content is parsed and stored for usage in +// GetConfigForClient. +func (c *DynamicServingCertificateController) syncCerts() error { + newContent, err := c.newTLSContent() + if err != nil { + return err + } + // if the content is the same as what we currently have, we can simply skip it. This works because we are single + // threaded. If you ever make this multi-threaded, add a lock. + if newContent.Equal(c.currentlyServedContent) { + return nil + } + + // make a shallow copy and override the dynamic pieces which have changed. + newTLSConfigCopy := c.baseTLSConfig.Clone() + + // parse new content to add to TLSConfig + if len(newContent.clientCA.caBundle) > 0 { + newClientCAPool := x509.NewCertPool() + newClientCAs, err := cert.ParseCertsPEM(newContent.clientCA.caBundle) + if err != nil { + return fmt.Errorf("unable to load client CA file %q: %v", string(newContent.clientCA.caBundle), err) + } + for i, cert := range newClientCAs { + klog.V(2).Infof("loaded client CA [%d/%q]: %s", i, c.clientCA.Name(), GetHumanCertDetail(cert)) + if c.eventRecorder != nil { + c.eventRecorder.Eventf(&corev1.ObjectReference{Name: c.clientCA.Name()}, nil, corev1.EventTypeWarning, "TLSConfigChanged", "CACertificateReload", "loaded client CA [%d/%q]: %s", i, c.clientCA.Name(), GetHumanCertDetail(cert)) + } + + newClientCAPool.AddCert(cert) + } + + newTLSConfigCopy.ClientCAs = newClientCAPool + } + + if len(newContent.servingCert.cert) > 0 && len(newContent.servingCert.key) > 0 { + cert, err := tls.X509KeyPair(newContent.servingCert.cert, newContent.servingCert.key) + if err != nil { + return fmt.Errorf("invalid serving cert keypair: %v", err) + } + + x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return fmt.Errorf("invalid serving cert: %v", err) + } + + klog.V(2).Infof("loaded serving cert [%q]: %s", c.servingCert.Name(), GetHumanCertDetail(x509Cert)) + if c.eventRecorder != nil { + c.eventRecorder.Eventf(&corev1.ObjectReference{Name: c.servingCert.Name()}, nil, corev1.EventTypeWarning, "TLSConfigChanged", "ServingCertificateReload", "loaded serving cert [%q]: %s", c.servingCert.Name(), GetHumanCertDetail(x509Cert)) + } + + newTLSConfigCopy.Certificates = []tls.Certificate{cert} + } + + if len(newContent.sniCerts) > 0 { + newTLSConfigCopy.NameToCertificate, err = c.BuildNamedCertificates(newContent.sniCerts) + if err != nil { + return fmt.Errorf("unable to build named certificate map: %v", err) + } + + // append all named certs. Otherwise, the go tls stack will think no SNI processing + // is necessary because there is only one cert anyway. + // Moreover, if servingCert is not set, the first SNI + // cert will become the default cert. That's what we expect anyway. + for _, sniCert := range newTLSConfigCopy.NameToCertificate { + newTLSConfigCopy.Certificates = append(newTLSConfigCopy.Certificates, *sniCert) + } + } + + // store new values of content for serving. + c.currentServingTLSConfig.Store(newTLSConfigCopy) + c.currentlyServedContent = newContent // this is single threaded, so we have no locking issue + + return nil +} + +// RunOnce runs a single sync step to ensure that we have a valid starting configuration. +func (c *DynamicServingCertificateController) RunOnce() error { + return c.syncCerts() +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *DynamicServingCertificateController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting DynamicServingCertificateController") + defer klog.Infof("Shutting down DynamicServingCertificateController") + + // synchronously load once. We will trigger again, so ignoring any error is fine + _ = c.RunOnce() + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + go wait.Until(func() { + c.Enqueue() + }, 1*time.Minute, stopCh) + + <-stopCh +} + +func (c *DynamicServingCertificateController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *DynamicServingCertificateController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.syncCerts() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Enqueue a method to allow separate control loops to cause the certificate controller to trigger and read content. +func (c *DynamicServingCertificateController) Enqueue() { + c.queue.Add(workItemKey) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go new file mode 100644 index 0000000000..89e19ea5a3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go @@ -0,0 +1,107 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "crypto/x509" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +type unionCAContent []CAContentProvider + +var _ Notifier = &unionCAContent{} +var _ CAContentProvider = &unionCAContent{} +var _ ControllerRunner = &unionCAContent{} + +// NewUnionCAContentProvider returns a CAContentProvider that is a union of other CAContentProviders +func NewUnionCAContentProvider(caContentProviders ...CAContentProvider) CAContentProvider { + return unionCAContent(caContentProviders) +} + +// Name is just an identifier +func (c unionCAContent) Name() string { + names := []string{} + for _, curr := range c { + names = append(names, curr.Name()) + } + return strings.Join(names, ",") +} + +// CurrentCABundleContent provides ca bundle byte content +func (c unionCAContent) CurrentCABundleContent() []byte { + caBundles := [][]byte{} + for _, curr := range c { + if currCABytes := curr.CurrentCABundleContent(); len(currCABytes) > 0 { + caBundles = append(caBundles, []byte(strings.TrimSpace(string(currCABytes)))) + } + } + + return bytes.Join(caBundles, []byte("\n")) +} + +// CurrentCABundleContent provides ca bundle byte content +func (c unionCAContent) VerifyOptions() (x509.VerifyOptions, bool) { + currCABundle := c.CurrentCABundleContent() + if len(currCABundle) == 0 { + return x509.VerifyOptions{}, false + } + + // TODO make more efficient. This isn't actually used in any of our mainline paths. It's called to build the TLSConfig + // TODO on file changes, but the actual authentication runs against the individual items, not the union. + ret, err := newCABundleAndVerifier(c.Name(), c.CurrentCABundleContent()) + if err != nil { + // because we're made up of already vetted values, this indicates some kind of coding error + panic(err) + } + + return ret.verifyOptions, true +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c unionCAContent) AddListener(listener Listener) { + for _, curr := range c { + if notifier, ok := curr.(Notifier); ok { + notifier.AddListener(listener) + } + } +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c unionCAContent) RunOnce() error { + errors := []error{} + for _, curr := range c { + if controller, ok := curr.(ControllerRunner); ok { + if err := controller.RunOnce(); err != nil { + errors = append(errors, err) + } + } + } + + return utilerrors.NewAggregate(errors) +} + +// Run runs the controller +func (c unionCAContent) Run(workers int, stopCh <-chan struct{}) { + for _, curr := range c { + if controller, ok := curr.(ControllerRunner); ok { + go controller.Run(workers, stopCh) + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go new file mode 100644 index 0000000000..6906045cd2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/x509" + "fmt" + "strings" + "time" +) + +// GetHumanCertDetail is a convenient method for printing compact details of certificate that helps when debugging +// kube-apiserver usage of certs. +func GetHumanCertDetail(certificate *x509.Certificate) string { + humanName := certificate.Subject.CommonName + signerHumanName := certificate.Issuer.CommonName + if certificate.Subject.CommonName == certificate.Issuer.CommonName { + signerHumanName = "" + } + + usages := []string{} + for _, curr := range certificate.ExtKeyUsage { + if curr == x509.ExtKeyUsageClientAuth { + usages = append(usages, "client") + continue + } + if curr == x509.ExtKeyUsageServerAuth { + usages = append(usages, "serving") + continue + } + + usages = append(usages, fmt.Sprintf("%d", curr)) + } + + validServingNames := []string{} + for _, ip := range certificate.IPAddresses { + validServingNames = append(validServingNames, ip.String()) + } + for _, dnsName := range certificate.DNSNames { + validServingNames = append(validServingNames, dnsName) + } + servingString := "" + if len(validServingNames) > 0 { + servingString = fmt.Sprintf(" validServingFor=[%s]", strings.Join(validServingNames, ",")) + } + + groupString := "" + if len(certificate.Subject.Organization) > 0 { + groupString = fmt.Sprintf(" groups=[%s]", strings.Join(certificate.Subject.Organization, ",")) + } + + return fmt.Sprintf("%q [%s]%s%s issuer=%q (%v to %v (now=%v))", humanName, strings.Join(usages, ","), groupString, servingString, signerHumanName, certificate.NotBefore.UTC(), certificate.NotAfter.UTC(), + time.Now().UTC()) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go new file mode 100644 index 0000000000..b7e378e430 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go @@ -0,0 +1,261 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package egressselector + +import ( + "fmt" + "io/ioutil" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/apis/apiserver" + "k8s.io/apiserver/pkg/apis/apiserver/install" + "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" + "k8s.io/utils/path" + "sigs.k8s.io/yaml" +) + +var cfgScheme = runtime.NewScheme() + +// validEgressSelectorNames contains the set of valid egress selctor names. +// 'master' is deprecated in favor of 'controlplane' and will be removed in v1.22. +var validEgressSelectorNames = sets.NewString("master", "controlplane", "cluster", "etcd") + +func init() { + install.Install(cfgScheme) +} + +// ReadEgressSelectorConfiguration reads the egress selector configuration at the specified path. +// It returns the loaded egress selector configuration if the input file aligns with the required syntax. +// If it does not align with the provided syntax, it returns a default configuration which should function as a no-op. +// It does this by returning a nil configuration, which preserves backward compatibility. +// This works because prior to this there was no egress selector configuration. +// It returns an error if the file did not exist. +func ReadEgressSelectorConfiguration(configFilePath string) (*apiserver.EgressSelectorConfiguration, error) { + if configFilePath == "" { + return nil, nil + } + // a file was provided, so we just read it. + data, err := ioutil.ReadFile(configFilePath) + if err != nil { + return nil, fmt.Errorf("unable to read egress selector configuration from %q [%v]", configFilePath, err) + } + var decodedConfig v1beta1.EgressSelectorConfiguration + err = yaml.Unmarshal(data, &decodedConfig) + if err != nil { + // we got an error where the decode wasn't related to a missing type + return nil, err + } + if decodedConfig.Kind != "EgressSelectorConfiguration" { + return nil, fmt.Errorf("invalid service configuration object %q", decodedConfig.Kind) + } + internalConfig := &apiserver.EgressSelectorConfiguration{} + if err := cfgScheme.Convert(&decodedConfig, internalConfig, nil); err != nil { + // we got an error where the decode wasn't related to a missing type + return nil, err + } + return internalConfig, nil +} + +// ValidateEgressSelectorConfiguration checks the apiserver.EgressSelectorConfiguration for +// common configuration errors. It will return error for problems such as configuring mtls/cert +// settings for protocol which do not support security. It will also try to catch errors such as +// incorrect file paths. It will return nil if it does not find anything wrong. +func ValidateEgressSelectorConfiguration(config *apiserver.EgressSelectorConfiguration) field.ErrorList { + allErrs := field.ErrorList{} + if config == nil { + return allErrs // Treating a nil configuration as valid + } + for _, service := range config.EgressSelections { + fldPath := field.NewPath("service", "connection") + switch service.Connection.ProxyProtocol { + case apiserver.ProtocolDirect: + allErrs = append(allErrs, validateDirectConnection(service.Connection, fldPath)...) + case apiserver.ProtocolHTTPConnect: + allErrs = append(allErrs, validateHTTPConnectTransport(service.Connection.Transport, fldPath)...) + case apiserver.ProtocolGRPC: + allErrs = append(allErrs, validateGRPCTransport(service.Connection.Transport, fldPath)...) + default: + allErrs = append(allErrs, field.NotSupported( + fldPath.Child("protocol"), + service.Connection.ProxyProtocol, + []string{ + string(apiserver.ProtocolDirect), + string(apiserver.ProtocolHTTPConnect), + string(apiserver.ProtocolGRPC), + })) + } + } + + var foundControlPlane, foundMaster bool + for _, service := range config.EgressSelections { + canonicalName := strings.ToLower(service.Name) + + if !validEgressSelectorNames.Has(canonicalName) { + allErrs = append(allErrs, field.NotSupported(field.NewPath("egressSelection", "name"), canonicalName, validEgressSelectorNames.List())) + continue + } + + if canonicalName == "master" { + foundMaster = true + } + + if canonicalName == "controlplane" { + foundControlPlane = true + } + } + + // error if both master and controlplane egress selectors are set + if foundMaster && foundControlPlane { + allErrs = append(allErrs, field.Forbidden(field.NewPath("egressSelection", "name"), "both egressSelection names 'master' and 'controlplane' are specified, only one is allowed")) + } + + return allErrs +} + +func validateHTTPConnectTransport(transport *apiserver.Transport, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if transport == nil { + allErrs = append(allErrs, field.Required( + fldPath.Child("transport"), + "transport must be set for HTTPConnect")) + return allErrs + } + + if transport.TCP != nil && transport.UDS != nil { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tcp"), + transport.TCP, + "TCP and UDS cannot both be set")) + } else if transport.TCP == nil && transport.UDS == nil { + allErrs = append(allErrs, field.Required( + fldPath.Child("tcp"), + "One of TCP or UDS must be set")) + } else if transport.TCP != nil { + allErrs = append(allErrs, validateTCPConnection(transport.TCP, fldPath)...) + } else if transport.UDS != nil { + allErrs = append(allErrs, validateUDSConnection(transport.UDS, fldPath)...) + } + return allErrs +} + +func validateGRPCTransport(transport *apiserver.Transport, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if transport == nil { + allErrs = append(allErrs, field.Required( + fldPath.Child("transport"), + "transport must be set for GRPC")) + return allErrs + } + + if transport.UDS != nil { + allErrs = append(allErrs, validateUDSConnection(transport.UDS, fldPath)...) + } else { + allErrs = append(allErrs, field.Required( + fldPath.Child("uds"), + "UDS must be set with GRPC")) + } + return allErrs +} + +func validateDirectConnection(connection apiserver.Connection, fldPath *field.Path) field.ErrorList { + if connection.Transport != nil { + return field.ErrorList{field.Invalid( + fldPath.Child("transport"), + "direct", + "Transport config should be absent for direct connect"), + } + } + + return nil +} + +func validateUDSConnection(udsConfig *apiserver.UDSTransport, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if udsConfig.UDSName == "" { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("udsName"), + "nil", + "UDSName should be present for UDS connections")) + } + return allErrs +} + +func validateTCPConnection(tcpConfig *apiserver.TCPTransport, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if strings.HasPrefix(tcpConfig.URL, "http://") { + if tcpConfig.TLSConfig != nil { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tlsConfig"), + "nil", + "TLSConfig config should not be present when using HTTP")) + } + } else if strings.HasPrefix(tcpConfig.URL, "https://") { + return validateTLSConfig(tcpConfig.TLSConfig, fldPath) + } else { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("url"), + tcpConfig.URL, + "supported connection protocols are http:// and https://")) + } + return allErrs +} + +func validateTLSConfig(tlsConfig *apiserver.TLSConfig, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if tlsConfig == nil { + allErrs = append(allErrs, field.Required( + fldPath.Child("tlsConfig"), + "TLSConfig must be present when using HTTPS")) + return allErrs + } + if tlsConfig.CABundle != "" { + if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.CABundle); !exists || err != nil { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tlsConfig", "caBundle"), + tlsConfig.CABundle, + "TLS config ca bundle does not exist")) + } + } + if tlsConfig.ClientCert == "" { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tlsConfig", "clientCert"), + "nil", + "Using TLS requires clientCert")) + } else if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.ClientCert); !exists || err != nil { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tlsConfig", "clientCert"), + tlsConfig.ClientCert, + "TLS client cert does not exist")) + } + if tlsConfig.ClientKey == "" { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tlsConfig", "clientKey"), + "nil", + "Using TLS requires requires clientKey")) + } else if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.ClientKey); !exists || err != nil { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tlsConfig", "clientKey"), + tlsConfig.ClientKey, + "TLS client key does not exist")) + } + return allErrs +} diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go new file mode 100644 index 0000000000..a849575b8e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go @@ -0,0 +1,373 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package egressselector + +import ( + "bufio" + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" + + "google.golang.org/grpc" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apiserver/pkg/apis/apiserver" + egressmetrics "k8s.io/apiserver/pkg/server/egressselector/metrics" + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" + client "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client" +) + +var directDialer utilnet.DialFunc = http.DefaultTransport.(*http.Transport).DialContext + +// EgressSelector is the map of network context type to context dialer, for network egress. +type EgressSelector struct { + egressToDialer map[EgressType]utilnet.DialFunc +} + +// EgressType is an indicator of which egress selection should be used for sending traffic. +// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190226-network-proxy.md#network-context +type EgressType int + +const ( + // ControlPlane is the EgressType for traffic intended to go to the control plane. + ControlPlane EgressType = iota + // Etcd is the EgressType for traffic intended to go to Kubernetes persistence store. + Etcd + // Cluster is the EgressType for traffic intended to go to the system being managed by Kubernetes. + Cluster +) + +// NetworkContext is the struct used by Kubernetes API Server to indicate where it intends traffic to be sent. +type NetworkContext struct { + // EgressSelectionName is the unique name of the + // EgressSelectorConfiguration which determines + // the network we route the traffic to. + EgressSelectionName EgressType +} + +// Lookup is the interface to get the dialer function for the network context. +type Lookup func(networkContext NetworkContext) (utilnet.DialFunc, error) + +// String returns the canonical string representation of the egress type +func (s EgressType) String() string { + switch s { + case ControlPlane: + return "controlplane" + case Etcd: + return "etcd" + case Cluster: + return "cluster" + default: + return "invalid" + } +} + +// AsNetworkContext is a helper function to make it easy to get the basic NetworkContext objects. +func (s EgressType) AsNetworkContext() NetworkContext { + return NetworkContext{EgressSelectionName: s} +} + +func lookupServiceName(name string) (EgressType, error) { + switch strings.ToLower(name) { + // 'master' is deprecated, interpret "master" as controlplane internally until removed in v1.22. + case "master": + klog.Warning("EgressSelection name 'master' is deprecated, use 'controlplane' instead") + return ControlPlane, nil + case "controlplane": + return ControlPlane, nil + case "etcd": + return Etcd, nil + case "cluster": + return Cluster, nil + } + return -1, fmt.Errorf("unrecognized service name %s", name) +} + +func tunnelHTTPConnect(proxyConn net.Conn, proxyAddress, addr string) (net.Conn, error) { + fmt.Fprintf(proxyConn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, "127.0.0.1") + br := bufio.NewReader(proxyConn) + res, err := http.ReadResponse(br, nil) + if err != nil { + proxyConn.Close() + return nil, fmt.Errorf("reading HTTP response from CONNECT to %s via proxy %s failed: %v", + addr, proxyAddress, err) + } + if res.StatusCode != 200 { + proxyConn.Close() + return nil, fmt.Errorf("proxy error from %s while dialing %s, code %d: %v", + proxyAddress, addr, res.StatusCode, res.Status) + } + + // It's safe to discard the bufio.Reader here and return the + // original TCP conn directly because we only use this for + // TLS, and in TLS the client speaks first, so we know there's + // no unbuffered data. But we can double-check. + if br.Buffered() > 0 { + proxyConn.Close() + return nil, fmt.Errorf("unexpected %d bytes of buffered data from CONNECT proxy %q", + br.Buffered(), proxyAddress) + } + return proxyConn, nil +} + +type proxier interface { + // proxy returns a connection to addr. + proxy(addr string) (net.Conn, error) +} + +var _ proxier = &httpConnectProxier{} + +type httpConnectProxier struct { + conn net.Conn + proxyAddress string +} + +func (t *httpConnectProxier) proxy(addr string) (net.Conn, error) { + return tunnelHTTPConnect(t.conn, t.proxyAddress, addr) +} + +var _ proxier = &grpcProxier{} + +type grpcProxier struct { + tunnel client.Tunnel +} + +func (g *grpcProxier) proxy(addr string) (net.Conn, error) { + return g.tunnel.Dial("tcp", addr) +} + +type proxyServerConnector interface { + // connect establishes connection to the proxy server, and returns a + // proxier based on the connection. + connect() (proxier, error) +} + +type tcpHTTPConnectConnector struct { + proxyAddress string + tlsConfig *tls.Config +} + +func (t *tcpHTTPConnectConnector) connect() (proxier, error) { + conn, err := tls.Dial("tcp", t.proxyAddress, t.tlsConfig) + if err != nil { + return nil, err + } + return &httpConnectProxier{conn: conn, proxyAddress: t.proxyAddress}, nil +} + +type udsHTTPConnectConnector struct { + udsName string +} + +func (u *udsHTTPConnectConnector) connect() (proxier, error) { + conn, err := net.Dial("unix", u.udsName) + if err != nil { + return nil, err + } + return &httpConnectProxier{conn: conn, proxyAddress: u.udsName}, nil +} + +type udsGRPCConnector struct { + udsName string +} + +func (u *udsGRPCConnector) connect() (proxier, error) { + udsName := u.udsName + dialOption := grpc.WithContextDialer(func(context.Context, string) (net.Conn, error) { + c, err := net.Dial("unix", udsName) + if err != nil { + klog.Errorf("failed to create connection to uds name %s, error: %v", udsName, err) + } + return c, err + }) + + tunnel, err := client.CreateSingleUseGrpcTunnel(udsName, dialOption, grpc.WithInsecure()) + if err != nil { + return nil, err + } + return &grpcProxier{tunnel: tunnel}, nil +} + +type dialerCreator struct { + connector proxyServerConnector + direct bool + options metricsOptions +} + +type metricsOptions struct { + transport string + protocol string +} + +func (d *dialerCreator) createDialer() utilnet.DialFunc { + if d.direct { + return directDialer + } + return func(ctx context.Context, network, addr string) (net.Conn, error) { + trace := utiltrace.New(fmt.Sprintf("Proxy via HTTP Connect over %s", d.options.transport), utiltrace.Field{Key: "address", Value: addr}) + defer trace.LogIfLong(500 * time.Millisecond) + start := egressmetrics.Metrics.Clock().Now() + proxier, err := d.connector.connect() + if err != nil { + egressmetrics.Metrics.ObserveDialFailure(d.options.protocol, d.options.transport, egressmetrics.StageConnect) + return nil, err + } + conn, err := proxier.proxy(addr) + if err != nil { + egressmetrics.Metrics.ObserveDialFailure(d.options.protocol, d.options.transport, egressmetrics.StageProxy) + return nil, err + } + egressmetrics.Metrics.ObserveDialLatency(egressmetrics.Metrics.Clock().Now().Sub(start), d.options.protocol, d.options.transport) + return conn, nil + } +} + +func getTLSConfig(t *apiserver.TLSConfig) (*tls.Config, error) { + clientCert := t.ClientCert + clientKey := t.ClientKey + caCert := t.CABundle + clientCerts, err := tls.LoadX509KeyPair(clientCert, clientKey) + if err != nil { + return nil, fmt.Errorf("failed to read key pair %s & %s, got %v", clientCert, clientKey, err) + } + certPool := x509.NewCertPool() + if caCert != "" { + certBytes, err := ioutil.ReadFile(caCert) + if err != nil { + return nil, fmt.Errorf("failed to read cert file %s, got %v", caCert, err) + } + ok := certPool.AppendCertsFromPEM(certBytes) + if !ok { + return nil, fmt.Errorf("failed to append CA cert to the cert pool") + } + } else { + // Use host's root CA set instead of providing our own + certPool = nil + } + return &tls.Config{ + Certificates: []tls.Certificate{clientCerts}, + RootCAs: certPool, + }, nil +} + +func getProxyAddress(urlString string) (string, error) { + proxyURL, err := url.Parse(urlString) + if err != nil { + return "", fmt.Errorf("invalid proxy server url %q: %v", urlString, err) + } + return proxyURL.Host, nil +} + +func connectionToDialerCreator(c apiserver.Connection) (*dialerCreator, error) { + switch c.ProxyProtocol { + + case apiserver.ProtocolHTTPConnect: + if c.Transport.UDS != nil { + return &dialerCreator{ + connector: &udsHTTPConnectConnector{ + udsName: c.Transport.UDS.UDSName, + }, + options: metricsOptions{ + transport: egressmetrics.TransportUDS, + protocol: egressmetrics.ProtocolHTTPConnect, + }, + }, nil + } else if c.Transport.TCP != nil { + tlsConfig, err := getTLSConfig(c.Transport.TCP.TLSConfig) + if err != nil { + return nil, err + } + proxyAddress, err := getProxyAddress(c.Transport.TCP.URL) + if err != nil { + return nil, err + } + return &dialerCreator{ + connector: &tcpHTTPConnectConnector{ + tlsConfig: tlsConfig, + proxyAddress: proxyAddress, + }, + options: metricsOptions{ + transport: egressmetrics.TransportTCP, + protocol: egressmetrics.ProtocolHTTPConnect, + }, + }, nil + } else { + return nil, fmt.Errorf("Either a TCP or UDS transport must be specified") + } + case apiserver.ProtocolGRPC: + if c.Transport.UDS != nil { + return &dialerCreator{ + connector: &udsGRPCConnector{ + udsName: c.Transport.UDS.UDSName, + }, + options: metricsOptions{ + transport: egressmetrics.TransportUDS, + protocol: egressmetrics.ProtocolGRPC, + }, + }, nil + } + return nil, fmt.Errorf("UDS transport must be specified for GRPC") + case apiserver.ProtocolDirect: + return &dialerCreator{direct: true}, nil + default: + return nil, fmt.Errorf("unrecognized service connection protocol %q", c.ProxyProtocol) + } + +} + +// NewEgressSelector configures lookup mechanism for Lookup. +// It does so based on a EgressSelectorConfiguration which was read at startup. +func NewEgressSelector(config *apiserver.EgressSelectorConfiguration) (*EgressSelector, error) { + if config == nil || config.EgressSelections == nil { + // No Connection Services configured, leaving the serviceMap empty, will return default dialer. + return nil, nil + } + cs := &EgressSelector{ + egressToDialer: make(map[EgressType]utilnet.DialFunc), + } + for _, service := range config.EgressSelections { + name, err := lookupServiceName(service.Name) + if err != nil { + return nil, err + } + dialerCreator, err := connectionToDialerCreator(service.Connection) + if err != nil { + return nil, fmt.Errorf("failed to create dialer for egressSelection %q: %v", name, err) + } + cs.egressToDialer[name] = dialerCreator.createDialer() + } + return cs, nil +} + +// Lookup gets the dialer function for the network context. +// This is configured for the Kubernetes API Server at startup. +func (cs *EgressSelector) Lookup(networkContext NetworkContext) (utilnet.DialFunc, error) { + if cs.egressToDialer == nil { + // The round trip wrapper will over-ride the dialContext method appropriately + return nil, nil + } + + return cs.egressToDialer[networkContext.EgressSelectionName], nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go new file mode 100644 index 0000000000..04ad61c4fd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go @@ -0,0 +1,114 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +const ( + namespace = "apiserver" + subsystem = "egress_dialer" + + // ProtocolHTTPConnect means that the proxy protocol is http-connect. + ProtocolHTTPConnect = "http_connect" + // ProtocolGRPC means that the proxy protocol is the GRPC protocol. + ProtocolGRPC = "grpc" + // TransportTCP means that the transport is TCP. + TransportTCP = "tcp" + // TransportUDS means that the transport is UDS. + TransportUDS = "uds" + // StageConnect indicates that the dial failed at establishing connection to the proxy server. + StageConnect = "connect" + // StageProxy indicates that the dial failed at requesting the proxy server to proxy. + StageProxy = "proxy" +) + +var ( + // Use buckets ranging from 5 ms to 12.5 seconds. + latencyBuckets = []float64{0.005, 0.025, 0.1, 0.5, 2.5, 12.5} + + // Metrics provides access to all dial metrics. + Metrics = newDialMetrics() +) + +// DialMetrics instruments dials to proxy server with prometheus metrics. +type DialMetrics struct { + clock clock.Clock + latencies *metrics.HistogramVec + failures *metrics.CounterVec +} + +// newDialMetrics create a new DialMetrics, configured with default metric names. +func newDialMetrics() *DialMetrics { + latencies := metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dial_duration_seconds", + Help: "Dial latency histogram in seconds, labeled by the protocol (http-connect or grpc), transport (tcp or uds)", + Buckets: latencyBuckets, + StabilityLevel: metrics.ALPHA, + }, + []string{"protocol", "transport"}, + ) + + failures := metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dial_failure_count", + Help: "Dial failure count, labeled by the protocol (http-connect or grpc), transport (tcp or uds), and stage (connect or proxy). The stage indicates at which stage the dial failed", + StabilityLevel: metrics.ALPHA, + }, + []string{"protocol", "transport", "stage"}, + ) + + legacyregistry.MustRegister(latencies) + legacyregistry.MustRegister(failures) + return &DialMetrics{latencies: latencies, failures: failures, clock: clock.RealClock{}} +} + +// Clock returns the clock. +func (m *DialMetrics) Clock() clock.Clock { + return m.clock +} + +// SetClock sets the clock. +func (m *DialMetrics) SetClock(c clock.Clock) { + m.clock = c +} + +// Reset resets the metrics. +func (m *DialMetrics) Reset() { + m.latencies.Reset() + m.failures.Reset() +} + +// ObserveDialLatency records the latency of a dial, labeled by protocol, transport. +func (m *DialMetrics) ObserveDialLatency(elapsed time.Duration, protocol, transport string) { + m.latencies.WithLabelValues(protocol, transport).Observe(elapsed.Seconds()) +} + +// ObserveDialFailure records a failed dial, labeled by protocol, transport, and the stage the dial failed at. +func (m *DialMetrics) ObserveDialFailure(protocol, transport, stage string) { + m.failures.WithLabelValues(protocol, transport, stage).Inc() +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/OWNERS b/vendor/k8s.io/apiserver/pkg/server/filters/OWNERS new file mode 100644 index 0000000000..c5f73991a3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- sttts +- dims diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/content_type.go b/vendor/k8s.io/apiserver/pkg/server/filters/content_type.go new file mode 100644 index 0000000000..65c73fcdc4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/content_type.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import "net/http" + +// WithContentType sets both the Content-Type and the X-Content-Type-Options (nosniff) header +func WithContentType(handler http.Handler, contentType string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", contentType) + w.Header().Set("X-Content-Type-Options", "nosniff") + handler.ServeHTTP(w, r) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/cors.go b/vendor/k8s.io/apiserver/pkg/server/filters/cors.go new file mode 100644 index 0000000000..67df760988 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/cors.go @@ -0,0 +1,98 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" + "regexp" + "strings" + + "k8s.io/klog/v2" +) + +// TODO: use restful.CrossOriginResourceSharing +// See github.com/emicklei/go-restful/blob/master/examples/restful-CORS-filter.go, and +// github.com/emicklei/go-restful/blob/master/examples/restful-basic-authentication.go +// Or, for a more detailed implementation use https://github.com/martini-contrib/cors +// or implement CORS at your proxy layer. + +// WithCORS is a simple CORS implementation that wraps an http Handler. +// Pass nil for allowedMethods and allowedHeaders to use the defaults. If allowedOriginPatterns +// is empty or nil, no CORS support is installed. +func WithCORS(handler http.Handler, allowedOriginPatterns []string, allowedMethods []string, allowedHeaders []string, exposedHeaders []string, allowCredentials string) http.Handler { + if len(allowedOriginPatterns) == 0 { + return handler + } + allowedOriginPatternsREs := allowedOriginRegexps(allowedOriginPatterns) + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + origin := req.Header.Get("Origin") + if origin != "" { + allowed := false + for _, re := range allowedOriginPatternsREs { + if allowed = re.MatchString(origin); allowed { + break + } + } + if allowed { + w.Header().Set("Access-Control-Allow-Origin", origin) + // Set defaults for methods and headers if nothing was passed + if allowedMethods == nil { + allowedMethods = []string{"POST", "GET", "OPTIONS", "PUT", "DELETE", "PATCH"} + } + if allowedHeaders == nil { + allowedHeaders = []string{"Content-Type", "Content-Length", "Accept-Encoding", "X-CSRF-Token", "Authorization", "X-Requested-With", "If-Modified-Since"} + } + if exposedHeaders == nil { + exposedHeaders = []string{"Date"} + } + w.Header().Set("Access-Control-Allow-Methods", strings.Join(allowedMethods, ", ")) + w.Header().Set("Access-Control-Allow-Headers", strings.Join(allowedHeaders, ", ")) + w.Header().Set("Access-Control-Expose-Headers", strings.Join(exposedHeaders, ", ")) + w.Header().Set("Access-Control-Allow-Credentials", allowCredentials) + + // Stop here if its a preflight OPTIONS request + if req.Method == "OPTIONS" { + w.WriteHeader(http.StatusNoContent) + return + } + } + } + // Dispatch to the next handler + handler.ServeHTTP(w, req) + }) +} + +func allowedOriginRegexps(allowedOrigins []string) []*regexp.Regexp { + res, err := compileRegexps(allowedOrigins) + if err != nil { + klog.Fatalf("Invalid CORS allowed origin, --cors-allowed-origins flag was set to %v - %v", strings.Join(allowedOrigins, ","), err) + } + return res +} + +// Takes a list of strings and compiles them into a list of regular expressions +func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { + regexps := []*regexp.Regexp{} + for _, regexpStr := range regexpStrings { + r, err := regexp.Compile(regexpStr) + if err != nil { + return []*regexp.Regexp{}, err + } + regexps = append(regexps, r) + } + return regexps, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/doc.go b/vendor/k8s.io/apiserver/pkg/server/filters/doc.go new file mode 100644 index 0000000000..a90cc3b496 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package filters contains all the http handler chain filters which +// are not api related. +package filters // import "k8s.io/apiserver/pkg/server/filters" diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/goaway.go b/vendor/k8s.io/apiserver/pkg/server/filters/goaway.go new file mode 100644 index 0000000000..065bd22bbc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/goaway.go @@ -0,0 +1,84 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "math/rand" + "net/http" + "sync" +) + +// GoawayDecider decides if server should send a GOAWAY +type GoawayDecider interface { + Goaway(r *http.Request) bool +} + +var ( + // randPool used to get a rand.Rand and generate a random number thread-safely, + // which improve the performance of using rand.Rand with a locker + randPool = &sync.Pool{ + New: func() interface{} { + return rand.New(rand.NewSource(rand.Int63())) + }, + } +) + +// WithProbabilisticGoaway returns an http.Handler that send GOAWAY probabilistically +// according to the given chance for HTTP2 requests. After client receive GOAWAY, +// the in-flight long-running requests will not be influenced, and the new requests +// will use a new TCP connection to re-balancing to another server behind the load balance. +func WithProbabilisticGoaway(inner http.Handler, chance float64) http.Handler { + return &goaway{ + handler: inner, + decider: &probabilisticGoawayDecider{ + chance: chance, + next: func() float64 { + rnd := randPool.Get().(*rand.Rand) + ret := rnd.Float64() + randPool.Put(rnd) + return ret + }, + }, + } +} + +// goaway send a GOAWAY to client according to decider for HTTP2 requests +type goaway struct { + handler http.Handler + decider GoawayDecider +} + +// ServeHTTP implement HTTP handler +func (h *goaway) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Proto == "HTTP/2.0" && h.decider.Goaway(r) { + // Send a GOAWAY and tear down the TCP connection when idle. + w.Header().Set("Connection", "close") + } + + h.handler.ServeHTTP(w, r) +} + +// probabilisticGoawayDecider send GOAWAY probabilistically according to chance +type probabilisticGoawayDecider struct { + chance float64 + next func() float64 +} + +// Goaway implement GoawayDecider +func (p *probabilisticGoawayDecider) Goaway(r *http.Request) bool { + return p.next() < p.chance +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/longrunning.go b/vendor/k8s.io/apiserver/pkg/server/filters/longrunning.go new file mode 100644 index 0000000000..1b58f16386 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/longrunning.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + apirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +// BasicLongRunningRequestCheck returns true if the given request has one of the specified verbs or one of the specified subresources, or is a profiler request. +func BasicLongRunningRequestCheck(longRunningVerbs, longRunningSubresources sets.String) apirequest.LongRunningRequestCheck { + return func(r *http.Request, requestInfo *apirequest.RequestInfo) bool { + if longRunningVerbs.Has(requestInfo.Verb) { + return true + } + if requestInfo.IsResourceRequest && longRunningSubresources.Has(requestInfo.Subresource) { + return true + } + if !requestInfo.IsResourceRequest && strings.HasPrefix(requestInfo.Path, "/debug/pprof/") { + return true + } + return false + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go new file mode 100644 index 0000000000..e873351c70 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -0,0 +1,220 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "fmt" + "net/http" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/metrics" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + fcmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + + "k8s.io/klog/v2" +) + +const ( + // Constant for the retry-after interval on rate limiting. + // TODO: maybe make this dynamic? or user-adjustable? + retryAfter = "1" + + // How often inflight usage metric should be updated. Because + // the metrics tracks maximal value over period making this + // longer will increase the metric value. + inflightUsageMetricUpdatePeriod = time.Second + + // How often to run maintenance on observations to ensure + // that they do not fall too far behind. + observationMaintenancePeriod = 10 * time.Second +) + +var nonMutatingRequestVerbs = sets.NewString("get", "list", "watch") + +func handleError(w http.ResponseWriter, r *http.Request, err error) { + errorMsg := fmt.Sprintf("Internal Server Error: %#v", r.RequestURI) + http.Error(w, errorMsg, http.StatusInternalServerError) + klog.Errorf(err.Error()) +} + +// requestWatermark is used to track maximal numbers of requests in a particular phase of handling +type requestWatermark struct { + phase string + readOnlyObserver, mutatingObserver fcmetrics.TimedObserver + lock sync.Mutex + readOnlyWatermark, mutatingWatermark int +} + +func (w *requestWatermark) recordMutating(mutatingVal int) { + w.mutatingObserver.Set(float64(mutatingVal)) + + w.lock.Lock() + defer w.lock.Unlock() + + if w.mutatingWatermark < mutatingVal { + w.mutatingWatermark = mutatingVal + } +} + +func (w *requestWatermark) recordReadOnly(readOnlyVal int) { + w.readOnlyObserver.Set(float64(readOnlyVal)) + + w.lock.Lock() + defer w.lock.Unlock() + + if w.readOnlyWatermark < readOnlyVal { + w.readOnlyWatermark = readOnlyVal + } +} + +// watermark tracks requests being executed (not waiting in a queue) +var watermark = &requestWatermark{ + phase: metrics.ExecutingPhase, + readOnlyObserver: fcmetrics.ReadWriteConcurrencyObserverPairGenerator.Generate(1, 1, []string{metrics.ReadOnlyKind}).RequestsExecuting, + mutatingObserver: fcmetrics.ReadWriteConcurrencyObserverPairGenerator.Generate(1, 1, []string{metrics.MutatingKind}).RequestsExecuting, +} + +// startWatermarkMaintenance starts the goroutines to observe and maintain the specified watermark. +func startWatermarkMaintenance(watermark *requestWatermark, stopCh <-chan struct{}) { + // Periodically update the inflight usage metric. + go wait.Until(func() { + watermark.lock.Lock() + readOnlyWatermark := watermark.readOnlyWatermark + mutatingWatermark := watermark.mutatingWatermark + watermark.readOnlyWatermark = 0 + watermark.mutatingWatermark = 0 + watermark.lock.Unlock() + + metrics.UpdateInflightRequestMetrics(watermark.phase, readOnlyWatermark, mutatingWatermark) + }, inflightUsageMetricUpdatePeriod, stopCh) + + // Periodically observe the watermarks. This is done to ensure that they do not fall too far behind. When they do + // fall too far behind, then there is a long delay in responding to the next request received while the observer + // catches back up. + go wait.Until(func() { + watermark.readOnlyObserver.Add(0) + watermark.mutatingObserver.Add(0) + }, observationMaintenancePeriod, stopCh) +} + +// WithMaxInFlightLimit limits the number of in-flight requests to buffer size of the passed in channel. +func WithMaxInFlightLimit( + handler http.Handler, + nonMutatingLimit int, + mutatingLimit int, + longRunningRequestCheck apirequest.LongRunningRequestCheck, +) http.Handler { + if nonMutatingLimit == 0 && mutatingLimit == 0 { + return handler + } + var nonMutatingChan chan bool + var mutatingChan chan bool + if nonMutatingLimit != 0 { + nonMutatingChan = make(chan bool, nonMutatingLimit) + watermark.readOnlyObserver.SetX1(float64(nonMutatingLimit)) + } + if mutatingLimit != 0 { + mutatingChan = make(chan bool, mutatingLimit) + watermark.mutatingObserver.SetX1(float64(mutatingLimit)) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if !ok { + handleError(w, r, fmt.Errorf("no RequestInfo found in context, handler chain must be wrong")) + return + } + + // Skip tracking long running events. + if longRunningRequestCheck != nil && longRunningRequestCheck(r, requestInfo) { + handler.ServeHTTP(w, r) + return + } + + var c chan bool + isMutatingRequest := !nonMutatingRequestVerbs.Has(requestInfo.Verb) + if isMutatingRequest { + c = mutatingChan + } else { + c = nonMutatingChan + } + + if c == nil { + handler.ServeHTTP(w, r) + } else { + + select { + case c <- true: + // We note the concurrency level both while the + // request is being served and after it is done being + // served, because both states contribute to the + // sampled stats on concurrency. + if isMutatingRequest { + watermark.recordMutating(len(c)) + } else { + watermark.recordReadOnly(len(c)) + } + defer func() { + <-c + if isMutatingRequest { + watermark.recordMutating(len(c)) + } else { + watermark.recordReadOnly(len(c)) + } + }() + handler.ServeHTTP(w, r) + + default: + // at this point we're about to return a 429, BUT not all actors should be rate limited. A system:master is so powerful + // that they should always get an answer. It's a super-admin or a loopback connection. + if currUser, ok := apirequest.UserFrom(ctx); ok { + for _, group := range currUser.GetGroups() { + if group == user.SystemPrivilegedGroup { + handler.ServeHTTP(w, r) + return + } + } + } + // We need to split this data between buckets used for throttling. + if isMutatingRequest { + metrics.DroppedRequests.WithLabelValues(metrics.MutatingKind).Inc() + } else { + metrics.DroppedRequests.WithLabelValues(metrics.ReadOnlyKind).Inc() + } + metrics.RecordRequestTermination(r, requestInfo, metrics.APIServerComponent, http.StatusTooManyRequests) + tooManyRequests(r, w) + } + } + }) +} + +// StartMaxInFlightWatermarkMaintenance starts the goroutines to observe and maintain watermarks for max-in-flight +// requests. +func StartMaxInFlightWatermarkMaintenance(stopCh <-chan struct{}) { + startWatermarkMaintenance(watermark, stopCh) +} + +func tooManyRequests(req *http.Request, w http.ResponseWriter) { + // Return a 429 status indicating "Too Many Requests" + w.Header().Set("Retry-After", retryAfter) + http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go new file mode 100644 index 0000000000..e1d7b7793a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go @@ -0,0 +1,155 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "context" + "fmt" + "net/http" + "sync/atomic" + + flowcontrol "k8s.io/api/flowcontrol/v1beta1" + apitypes "k8s.io/apimachinery/pkg/types" + epmetrics "k8s.io/apiserver/pkg/endpoints/metrics" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + fcmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + "k8s.io/klog/v2" +) + +type priorityAndFairnessKeyType int + +const priorityAndFairnessKey priorityAndFairnessKeyType = iota + +// PriorityAndFairnessClassification identifies the results of +// classification for API Priority and Fairness +type PriorityAndFairnessClassification struct { + FlowSchemaName string + FlowSchemaUID apitypes.UID + PriorityLevelName string + PriorityLevelUID apitypes.UID +} + +// GetClassification returns the classification associated with the +// given context, if any, otherwise nil +func GetClassification(ctx context.Context) *PriorityAndFairnessClassification { + return ctx.Value(priorityAndFairnessKey).(*PriorityAndFairnessClassification) +} + +// waitingMark tracks requests waiting rather than being executed +var waitingMark = &requestWatermark{ + phase: epmetrics.WaitingPhase, + readOnlyObserver: fcmetrics.ReadWriteConcurrencyObserverPairGenerator.Generate(1, 1, []string{epmetrics.ReadOnlyKind}).RequestsWaiting, + mutatingObserver: fcmetrics.ReadWriteConcurrencyObserverPairGenerator.Generate(1, 1, []string{epmetrics.MutatingKind}).RequestsWaiting, +} + +var atomicMutatingExecuting, atomicReadOnlyExecuting int32 +var atomicMutatingWaiting, atomicReadOnlyWaiting int32 + +// WithPriorityAndFairness limits the number of in-flight +// requests in a fine-grained way. +func WithPriorityAndFairness( + handler http.Handler, + longRunningRequestCheck apirequest.LongRunningRequestCheck, + fcIfc utilflowcontrol.Interface, +) http.Handler { + if fcIfc == nil { + klog.Warningf("priority and fairness support not found, skipping") + return handler + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if !ok { + handleError(w, r, fmt.Errorf("no RequestInfo found in context")) + return + } + user, ok := apirequest.UserFrom(ctx) + if !ok { + handleError(w, r, fmt.Errorf("no User found in context")) + return + } + + // Skip tracking long running requests. + if longRunningRequestCheck != nil && longRunningRequestCheck(r, requestInfo) { + klog.V(6).Infof("Serving RequestInfo=%#+v, user.Info=%#+v as longrunning\n", requestInfo, user) + handler.ServeHTTP(w, r) + return + } + + var classification *PriorityAndFairnessClassification + note := func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration) { + classification = &PriorityAndFairnessClassification{ + FlowSchemaName: fs.Name, + FlowSchemaUID: fs.UID, + PriorityLevelName: pl.Name, + PriorityLevelUID: pl.UID} + } + + var served bool + isMutatingRequest := !nonMutatingRequestVerbs.Has(requestInfo.Verb) + noteExecutingDelta := func(delta int32) { + if isMutatingRequest { + watermark.recordMutating(int(atomic.AddInt32(&atomicMutatingExecuting, delta))) + } else { + watermark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyExecuting, delta))) + } + } + noteWaitingDelta := func(delta int32) { + if isMutatingRequest { + waitingMark.recordMutating(int(atomic.AddInt32(&atomicMutatingWaiting, delta))) + } else { + waitingMark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyWaiting, delta))) + } + } + execute := func() { + noteExecutingDelta(1) + defer noteExecutingDelta(-1) + served = true + innerCtx := context.WithValue(ctx, priorityAndFairnessKey, classification) + innerReq := r.Clone(innerCtx) + w.Header().Set(flowcontrol.ResponseHeaderMatchedPriorityLevelConfigurationUID, string(classification.PriorityLevelUID)) + w.Header().Set(flowcontrol.ResponseHeaderMatchedFlowSchemaUID, string(classification.FlowSchemaUID)) + handler.ServeHTTP(w, innerReq) + } + digest := utilflowcontrol.RequestDigest{RequestInfo: requestInfo, User: user} + fcIfc.Handle(ctx, digest, note, func(inQueue bool) { + if inQueue { + noteWaitingDelta(1) + } else { + noteWaitingDelta(-1) + } + }, execute) + if !served { + if isMutatingRequest { + epmetrics.DroppedRequests.WithLabelValues(epmetrics.MutatingKind).Inc() + } else { + epmetrics.DroppedRequests.WithLabelValues(epmetrics.ReadOnlyKind).Inc() + } + epmetrics.RecordRequestTermination(r, requestInfo, epmetrics.APIServerComponent, http.StatusTooManyRequests) + tooManyRequests(r, w) + } + + }) +} + +// StartPriorityAndFairnessWatermarkMaintenance starts the goroutines to observe and maintain watermarks for +// priority-and-fairness requests. +func StartPriorityAndFairnessWatermarkMaintenance(stopCh <-chan struct{}) { + startWatermarkMaintenance(watermark, stopCh) + startWatermarkMaintenance(waitingMark, stopCh) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go new file mode 100644 index 0000000000..2405bfd1ff --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go @@ -0,0 +1,314 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "net" + "net/http" + "runtime" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/endpoints/metrics" + apirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +// WithTimeoutForNonLongRunningRequests times out non-long-running requests after the time given by timeout. +func WithTimeoutForNonLongRunningRequests(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, timeout time.Duration) http.Handler { + if longRunning == nil { + return handler + } + timeoutFunc := func(req *http.Request) (*http.Request, <-chan time.Time, func(), *apierrors.StatusError) { + // TODO unify this with apiserver.MaxInFlightLimit + ctx := req.Context() + + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if !ok { + // if this happens, the handler chain isn't setup correctly because there is no request info + return req, time.After(timeout), func() {}, apierrors.NewInternalError(fmt.Errorf("no request info found for request during timeout")) + } + + if longRunning(req, requestInfo) { + return req, nil, nil, nil + } + + ctx, cancel := context.WithCancel(ctx) + req = req.WithContext(ctx) + + postTimeoutFn := func() { + cancel() + metrics.RecordRequestTermination(req, requestInfo, metrics.APIServerComponent, http.StatusGatewayTimeout) + } + return req, time.After(timeout), postTimeoutFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", timeout), 0) + } + return WithTimeout(handler, timeoutFunc) +} + +type timeoutFunc = func(*http.Request) (req *http.Request, timeout <-chan time.Time, postTimeoutFunc func(), err *apierrors.StatusError) + +// WithTimeout returns an http.Handler that runs h with a timeout +// determined by timeoutFunc. The new http.Handler calls h.ServeHTTP to handle +// each request, but if a call runs for longer than its time limit, the +// handler responds with a 504 Gateway Timeout error and the message +// provided. (If msg is empty, a suitable default message will be sent.) After +// the handler times out, writes by h to its http.ResponseWriter will return +// http.ErrHandlerTimeout. If timeoutFunc returns a nil timeout channel, no +// timeout will be enforced. recordFn is a function that will be invoked whenever +// a timeout happens. +func WithTimeout(h http.Handler, timeoutFunc timeoutFunc) http.Handler { + return &timeoutHandler{h, timeoutFunc} +} + +type timeoutHandler struct { + handler http.Handler + timeout timeoutFunc +} + +func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + r, after, postTimeoutFn, err := t.timeout(r) + if after == nil { + t.handler.ServeHTTP(w, r) + return + } + + // resultCh is used as both errCh and stopCh + resultCh := make(chan interface{}) + tw := newTimeoutWriter(w) + go func() { + defer func() { + err := recover() + // do not wrap the sentinel ErrAbortHandler panic value + if err != nil && err != http.ErrAbortHandler { + // Same as stdlib http server code. Manually allocate stack + // trace buffer size to prevent excessively large logs + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + err = fmt.Sprintf("%v\n%s", err, buf) + } + resultCh <- err + }() + t.handler.ServeHTTP(tw, r) + }() + select { + case err := <-resultCh: + // panic if error occurs; stop otherwise + if err != nil { + panic(err) + } + return + case <-after: + defer func() { + // resultCh needs to have a reader, since the function doing + // the work needs to send to it. This is defer'd to ensure it runs + // ever if the post timeout work itself panics. + go func() { + res := <-resultCh + if res != nil { + switch t := res.(type) { + case error: + utilruntime.HandleError(t) + default: + utilruntime.HandleError(fmt.Errorf("%v", res)) + } + } + }() + }() + + postTimeoutFn() + tw.timeout(err) + } +} + +type timeoutWriter interface { + http.ResponseWriter + timeout(*apierrors.StatusError) +} + +func newTimeoutWriter(w http.ResponseWriter) timeoutWriter { + base := &baseTimeoutWriter{w: w} + + _, notifiable := w.(http.CloseNotifier) + _, hijackable := w.(http.Hijacker) + + switch { + case notifiable && hijackable: + return &closeHijackTimeoutWriter{base} + case notifiable: + return &closeTimeoutWriter{base} + case hijackable: + return &hijackTimeoutWriter{base} + default: + return base + } +} + +type baseTimeoutWriter struct { + w http.ResponseWriter + + mu sync.Mutex + // if the timeout handler has timeout + timedOut bool + // if this timeout writer has wrote header + wroteHeader bool + // if this timeout writer has been hijacked + hijacked bool +} + +func (tw *baseTimeoutWriter) Header() http.Header { + tw.mu.Lock() + defer tw.mu.Unlock() + + if tw.timedOut { + return http.Header{} + } + + return tw.w.Header() +} + +func (tw *baseTimeoutWriter) Write(p []byte) (int, error) { + tw.mu.Lock() + defer tw.mu.Unlock() + + if tw.timedOut { + return 0, http.ErrHandlerTimeout + } + if tw.hijacked { + return 0, http.ErrHijacked + } + + tw.wroteHeader = true + return tw.w.Write(p) +} + +func (tw *baseTimeoutWriter) Flush() { + tw.mu.Lock() + defer tw.mu.Unlock() + + if tw.timedOut { + return + } + + if flusher, ok := tw.w.(http.Flusher); ok { + flusher.Flush() + } +} + +func (tw *baseTimeoutWriter) WriteHeader(code int) { + tw.mu.Lock() + defer tw.mu.Unlock() + + if tw.timedOut || tw.wroteHeader || tw.hijacked { + return + } + + tw.wroteHeader = true + tw.w.WriteHeader(code) +} + +func (tw *baseTimeoutWriter) timeout(err *apierrors.StatusError) { + tw.mu.Lock() + defer tw.mu.Unlock() + + tw.timedOut = true + + // The timeout writer has not been used by the inner handler. + // We can safely timeout the HTTP request by sending by a timeout + // handler + if !tw.wroteHeader && !tw.hijacked { + tw.w.WriteHeader(http.StatusGatewayTimeout) + enc := json.NewEncoder(tw.w) + enc.Encode(&err.ErrStatus) + } else { + // The timeout writer has been used by the inner handler. There is + // no way to timeout the HTTP request at the point. We have to shutdown + // the connection for HTTP1 or reset stream for HTTP2. + // + // Note from the golang's docs: + // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes + // that the effect of the panic was isolated to the active request. + // It recovers the panic, logs a stack trace to the server error log, + // and either closes the network connection or sends an HTTP/2 + // RST_STREAM, depending on the HTTP protocol. To abort a handler so + // the client sees an interrupted response but the server doesn't log + // an error, panic with the value ErrAbortHandler. + // + // We are throwing http.ErrAbortHandler deliberately so that a client is notified and to suppress a not helpful stacktrace in the logs + panic(http.ErrAbortHandler) + } +} + +func (tw *baseTimeoutWriter) closeNotify() <-chan bool { + tw.mu.Lock() + defer tw.mu.Unlock() + + if tw.timedOut { + done := make(chan bool) + close(done) + return done + } + + return tw.w.(http.CloseNotifier).CloseNotify() +} + +func (tw *baseTimeoutWriter) hijack() (net.Conn, *bufio.ReadWriter, error) { + tw.mu.Lock() + defer tw.mu.Unlock() + + if tw.timedOut { + return nil, nil, http.ErrHandlerTimeout + } + conn, rw, err := tw.w.(http.Hijacker).Hijack() + if err == nil { + tw.hijacked = true + } + return conn, rw, err +} + +type closeTimeoutWriter struct { + *baseTimeoutWriter +} + +func (tw *closeTimeoutWriter) CloseNotify() <-chan bool { + return tw.closeNotify() +} + +type hijackTimeoutWriter struct { + *baseTimeoutWriter +} + +func (tw *hijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return tw.hijack() +} + +type closeHijackTimeoutWriter struct { + *baseTimeoutWriter +} + +func (tw *closeHijackTimeoutWriter) CloseNotify() <-chan bool { + return tw.closeNotify() +} + +func (tw *closeHijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return tw.hijack() +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go b/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go new file mode 100644 index 0000000000..857ce18830 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go @@ -0,0 +1,61 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "errors" + "fmt" + "net/http" + + "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/client-go/kubernetes/scheme" +) + +// WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown. +func WithWaitGroup(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, wg *utilwaitgroup.SafeWaitGroup) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if !ok { + // if this happens, the handler chain isn't setup correctly because there is no request info + responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context")) + return + } + + if !longRunning(req, requestInfo) { + if err := wg.Add(1); err != nil { + // When apiserver is shutting down, signal clients to retry + // There is a good chance the client hit a different server, so a tight retry is good for client responsiveness. + w.Header().Add("Retry-After", "1") + w.Header().Set("Content-Type", runtime.ContentTypeJSON) + w.Header().Set("X-Content-Type-Options", "nosniff") + statusErr := apierrors.NewServiceUnavailable("apiserver is shutting down").Status() + w.WriteHeader(int(statusErr.Code)) + fmt.Fprintln(w, runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &statusErr)) + return + } + defer wg.Done() + } + + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go b/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go new file mode 100644 index 0000000000..34c5398dba --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "fmt" + "net/http" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/server/httplog" + "k8s.io/klog/v2" +) + +// WithPanicRecovery wraps an http Handler to recover and log panics (except in the special case of http.ErrAbortHandler panics, which suppress logging). +func WithPanicRecovery(handler http.Handler, resolver request.RequestInfoResolver) http.Handler { + return withPanicRecovery(handler, func(w http.ResponseWriter, req *http.Request, err interface{}) { + if err == http.ErrAbortHandler { + // Honor the http.ErrAbortHandler sentinel panic value + // + // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes + // that the effect of the panic was isolated to the active request. + // It recovers the panic, logs a stack trace to the server error log, + // and either closes the network connection or sends an HTTP/2 + // RST_STREAM, depending on the HTTP protocol. To abort a handler so + // the client sees an interrupted response but the server doesn't log + // an error, panic with the value ErrAbortHandler. + // + // Note that the ReallyCrash variable controls the behaviour of the HandleCrash function + // So it might actually crash, after calling the handlers + if info, err := resolver.NewRequestInfo(req); err != nil { + metrics.RecordRequestAbort(req, nil) + } else { + metrics.RecordRequestAbort(req, info) + } + // This call can have different handlers, but the default chain rate limits. Call it after the metrics are updated + // in case the rate limit delays it. If you outrun the rate for this one timed out requests, something has gone + // seriously wrong with your server, but generally having a logging signal for timeouts is useful. + runtime.HandleError(fmt.Errorf("timeout or abort while handling: %v %q", req.Method, req.URL.Path)) + return + } + http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError) + klog.Errorf("apiserver panic'd on %v %v", req.Method, req.RequestURI) + }) +} + +func withPanicRecovery(handler http.Handler, crashHandler func(http.ResponseWriter, *http.Request, interface{})) http.Handler { + handler = httplog.WithLogging(handler, httplog.DefaultStacktracePred) + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + defer runtime.HandleCrash(func(err interface{}) { + crashHandler(w, req, err) + }) + + // Dispatch to the internal handler + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go new file mode 100644 index 0000000000..d7d60b213d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -0,0 +1,632 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "fmt" + "net/http" + gpath "path" + "strings" + "sync" + "time" + + systemd "github.com/coreos/go-systemd/daemon" + "github.com/go-openapi/spec" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" + genericapi "k8s.io/apiserver/pkg/endpoints" + "k8s.io/apiserver/pkg/endpoints/discovery" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/apiserver/pkg/server/routes" + "k8s.io/apiserver/pkg/storageversion" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utilopenapi "k8s.io/apiserver/pkg/util/openapi" + restclient "k8s.io/client-go/rest" + "k8s.io/klog/v2" + openapibuilder "k8s.io/kube-openapi/pkg/builder" + openapicommon "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/handler" + openapiutil "k8s.io/kube-openapi/pkg/util" + openapiproto "k8s.io/kube-openapi/pkg/util/proto" +) + +// Info about an API group. +type APIGroupInfo struct { + PrioritizedVersions []schema.GroupVersion + // Info about the resources in this group. It's a map from version to resource to the storage. + VersionedResourcesStorageMap map[string]map[string]rest.Storage + // OptionsExternalVersion controls the APIVersion used for common objects in the + // schema like api.Status, api.DeleteOptions, and metav1.ListOptions. Other implementors may + // define a version "v1beta1" but want to use the Kubernetes "v1" internal objects. + // If nil, defaults to groupMeta.GroupVersion. + // TODO: Remove this when https://github.com/kubernetes/kubernetes/issues/19018 is fixed. + OptionsExternalVersion *schema.GroupVersion + // MetaGroupVersion defaults to "meta.k8s.io/v1" and is the scheme group version used to decode + // common API implementations like ListOptions. Future changes will allow this to vary by group + // version (for when the inevitable meta/v2 group emerges). + MetaGroupVersion *schema.GroupVersion + + // Scheme includes all of the types used by this group and how to convert between them (or + // to convert objects from outside of this group that are accepted in this API). + // TODO: replace with interfaces + Scheme *runtime.Scheme + // NegotiatedSerializer controls how this group encodes and decodes data + NegotiatedSerializer runtime.NegotiatedSerializer + // ParameterCodec performs conversions for query parameters passed to API calls + ParameterCodec runtime.ParameterCodec + + // StaticOpenAPISpec is the spec derived from the definitions of all resources installed together. + // It is set during InstallAPIGroups, InstallAPIGroup, and InstallLegacyAPIGroup. + StaticOpenAPISpec *spec.Swagger +} + +// GenericAPIServer contains state for a Kubernetes cluster api server. +type GenericAPIServer struct { + // discoveryAddresses is used to build cluster IPs for discovery. + discoveryAddresses discovery.Addresses + + // LoopbackClientConfig is a config for a privileged loopback connection to the API server + LoopbackClientConfig *restclient.Config + + // minRequestTimeout is how short the request timeout can be. This is used to build the RESTHandler + minRequestTimeout time.Duration + + // ShutdownTimeout is the timeout used for server shutdown. This specifies the timeout before server + // gracefully shutdown returns. + ShutdownTimeout time.Duration + + // legacyAPIGroupPrefixes is used to set up URL parsing for authorization and for validating requests + // to InstallLegacyAPIGroup + legacyAPIGroupPrefixes sets.String + + // admissionControl is used to build the RESTStorage that backs an API Group. + admissionControl admission.Interface + + // SecureServingInfo holds configuration of the TLS server. + SecureServingInfo *SecureServingInfo + + // ExternalAddress is the address (hostname or IP and port) that should be used in + // external (public internet) URLs for this GenericAPIServer. + ExternalAddress string + + // Serializer controls how common API objects not in a group/version prefix are serialized for this server. + // Individual APIGroups may define their own serializers. + Serializer runtime.NegotiatedSerializer + + // "Outputs" + // Handler holds the handlers being used by this API server + Handler *APIServerHandler + + // listedPathProvider is a lister which provides the set of paths to show at / + listedPathProvider routes.ListedPathProvider + + // DiscoveryGroupManager serves /apis + DiscoveryGroupManager discovery.GroupManager + + // Enable swagger and/or OpenAPI if these configs are non-nil. + openAPIConfig *openapicommon.Config + + // OpenAPIVersionedService controls the /openapi/v2 endpoint, and can be used to update the served spec. + // It is set during PrepareRun. + OpenAPIVersionedService *handler.OpenAPIService + + // StaticOpenAPISpec is the spec derived from the restful container endpoints. + // It is set during PrepareRun. + StaticOpenAPISpec *spec.Swagger + + // PostStartHooks are each called after the server has started listening, in a separate go func for each + // with no guarantee of ordering between them. The map key is a name used for error reporting. + // It may kill the process with a panic if it wishes to by returning an error. + postStartHookLock sync.Mutex + postStartHooks map[string]postStartHookEntry + postStartHooksCalled bool + disabledPostStartHooks sets.String + + preShutdownHookLock sync.Mutex + preShutdownHooks map[string]preShutdownHookEntry + preShutdownHooksCalled bool + + // healthz checks + healthzLock sync.Mutex + healthzChecks []healthz.HealthChecker + healthzChecksInstalled bool + // livez checks + livezLock sync.Mutex + livezChecks []healthz.HealthChecker + livezChecksInstalled bool + // readyz checks + readyzLock sync.Mutex + readyzChecks []healthz.HealthChecker + readyzChecksInstalled bool + livezGracePeriod time.Duration + livezClock clock.Clock + // the readiness stop channel is used to signal that the apiserver has initiated a shutdown sequence, this + // will cause readyz to return unhealthy. + readinessStopCh chan struct{} + + // auditing. The backend is started after the server starts listening. + AuditBackend audit.Backend + + // Authorizer determines whether a user is allowed to make a certain request. The Handler does a preliminary + // authorization check using the request URI but it may be necessary to make additional checks, such as in + // the create-on-update case + Authorizer authorizer.Authorizer + + // EquivalentResourceRegistry provides information about resources equivalent to a given resource, + // and the kind associated with a given resource. As resources are installed, they are registered here. + EquivalentResourceRegistry runtime.EquivalentResourceRegistry + + // delegationTarget is the next delegate in the chain. This is never nil. + delegationTarget DelegationTarget + + // HandlerChainWaitGroup allows you to wait for all chain handlers finish after the server shutdown. + HandlerChainWaitGroup *utilwaitgroup.SafeWaitGroup + + // ShutdownDelayDuration allows to block shutdown for some time, e.g. until endpoints pointing to this API server + // have converged on all node. During this time, the API server keeps serving, /healthz will return 200, + // but /readyz will return failure. + ShutdownDelayDuration time.Duration + + // The limit on the request body size that would be accepted and decoded in a write request. + // 0 means no limit. + maxRequestBodyBytes int64 + + // APIServerID is the ID of this API server + APIServerID string + + // StorageVersionManager holds the storage versions of the API resources installed by this server. + StorageVersionManager storageversion.Manager +} + +// DelegationTarget is an interface which allows for composition of API servers with top level handling that works +// as expected. +type DelegationTarget interface { + // UnprotectedHandler returns a handler that is NOT protected by a normal chain + UnprotectedHandler() http.Handler + + // PostStartHooks returns the post-start hooks that need to be combined + PostStartHooks() map[string]postStartHookEntry + + // PreShutdownHooks returns the pre-stop hooks that need to be combined + PreShutdownHooks() map[string]preShutdownHookEntry + + // HealthzChecks returns the healthz checks that need to be combined + HealthzChecks() []healthz.HealthChecker + + // ListedPaths returns the paths for supporting an index + ListedPaths() []string + + // NextDelegate returns the next delegationTarget in the chain of delegations + NextDelegate() DelegationTarget + + // PrepareRun does post API installation setup steps. It calls recursively the same function of the delegates. + PrepareRun() preparedGenericAPIServer +} + +func (s *GenericAPIServer) UnprotectedHandler() http.Handler { + // when we delegate, we need the server we're delegating to choose whether or not to use gorestful + return s.Handler.Director +} +func (s *GenericAPIServer) PostStartHooks() map[string]postStartHookEntry { + return s.postStartHooks +} +func (s *GenericAPIServer) PreShutdownHooks() map[string]preShutdownHookEntry { + return s.preShutdownHooks +} +func (s *GenericAPIServer) HealthzChecks() []healthz.HealthChecker { + return s.healthzChecks +} +func (s *GenericAPIServer) ListedPaths() []string { + return s.listedPathProvider.ListedPaths() +} + +func (s *GenericAPIServer) NextDelegate() DelegationTarget { + return s.delegationTarget +} + +type emptyDelegate struct { +} + +func NewEmptyDelegate() DelegationTarget { + return emptyDelegate{} +} + +func (s emptyDelegate) UnprotectedHandler() http.Handler { + return nil +} +func (s emptyDelegate) PostStartHooks() map[string]postStartHookEntry { + return map[string]postStartHookEntry{} +} +func (s emptyDelegate) PreShutdownHooks() map[string]preShutdownHookEntry { + return map[string]preShutdownHookEntry{} +} +func (s emptyDelegate) HealthzChecks() []healthz.HealthChecker { + return []healthz.HealthChecker{} +} +func (s emptyDelegate) ListedPaths() []string { + return []string{} +} +func (s emptyDelegate) NextDelegate() DelegationTarget { + return nil +} +func (s emptyDelegate) PrepareRun() preparedGenericAPIServer { + return preparedGenericAPIServer{nil} +} + +// preparedGenericAPIServer is a private wrapper that enforces a call of PrepareRun() before Run can be invoked. +type preparedGenericAPIServer struct { + *GenericAPIServer +} + +// PrepareRun does post API installation setup steps. It calls recursively the same function of the delegates. +func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { + s.delegationTarget.PrepareRun() + + if s.openAPIConfig != nil { + s.OpenAPIVersionedService, s.StaticOpenAPISpec = routes.OpenAPI{ + Config: s.openAPIConfig, + }.Install(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux) + } + + s.installHealthz() + s.installLivez() + err := s.addReadyzShutdownCheck(s.readinessStopCh) + if err != nil { + klog.Errorf("Failed to install readyz shutdown check %s", err) + } + s.installReadyz() + + // Register audit backend preShutdownHook. + if s.AuditBackend != nil { + err := s.AddPreShutdownHook("audit-backend", func() error { + s.AuditBackend.Shutdown() + return nil + }) + if err != nil { + klog.Errorf("Failed to add pre-shutdown hook for audit-backend %s", err) + } + } + + return preparedGenericAPIServer{s} +} + +// Run spawns the secure http server. It only returns if stopCh is closed +// or the secure port cannot be listened on initially. +func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { + delayedStopCh := make(chan struct{}) + + go func() { + defer close(delayedStopCh) + + <-stopCh + + // As soon as shutdown is initiated, /readyz should start returning failure. + // This gives the load balancer a window defined by ShutdownDelayDuration to detect that /readyz is red + // and stop sending traffic to this server. + close(s.readinessStopCh) + + time.Sleep(s.ShutdownDelayDuration) + }() + + // close socket after delayed stopCh + stoppedCh, err := s.NonBlockingRun(delayedStopCh) + if err != nil { + return err + } + + <-stopCh + + // run shutdown hooks directly. This includes deregistering from the kubernetes endpoint in case of kube-apiserver. + err = s.RunPreShutdownHooks() + if err != nil { + return err + } + + // wait for the delayed stopCh before closing the handler chain (it rejects everything after Wait has been called). + <-delayedStopCh + // wait for stoppedCh that is closed when the graceful termination (server.Shutdown) is finished. + <-stoppedCh + + // Wait for all requests to finish, which are bounded by the RequestTimeout variable. + s.HandlerChainWaitGroup.Wait() + + return nil +} + +// NonBlockingRun spawns the secure http server. An error is +// returned if the secure port cannot be listened on. +// The returned channel is closed when the (asynchronous) termination is finished. +func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Use an stop channel to allow graceful shutdown without dropping audit events + // after http server shutdown. + auditStopCh := make(chan struct{}) + + // Start the audit backend before any request comes in. This means we must call Backend.Run + // before http server start serving. Otherwise the Backend.ProcessEvents call might block. + if s.AuditBackend != nil { + if err := s.AuditBackend.Run(auditStopCh); err != nil { + return nil, fmt.Errorf("failed to run the audit backend: %v", err) + } + } + + // Use an internal stop channel to allow cleanup of the listeners on error. + internalStopCh := make(chan struct{}) + var stoppedCh <-chan struct{} + if s.SecureServingInfo != nil && s.Handler != nil { + var err error + stoppedCh, err = s.SecureServingInfo.Serve(s.Handler, s.ShutdownTimeout, internalStopCh) + if err != nil { + close(internalStopCh) + close(auditStopCh) + return nil, err + } + } + + // Now that listener have bound successfully, it is the + // responsibility of the caller to close the provided channel to + // ensure cleanup. + go func() { + <-stopCh + close(internalStopCh) + if stoppedCh != nil { + <-stoppedCh + } + s.HandlerChainWaitGroup.Wait() + close(auditStopCh) + }() + + s.RunPostStartHooks(stopCh) + + if _, err := systemd.SdNotify(true, "READY=1\n"); err != nil { + klog.Errorf("Unable to send systemd daemon successful start message: %v\n", err) + } + + return stoppedCh, nil +} + +// installAPIResources is a private method for installing the REST storage backing each api groupversionresource +func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo, openAPIModels openapiproto.Models) error { + var resourceInfos []*storageversion.ResourceInfo + for _, groupVersion := range apiGroupInfo.PrioritizedVersions { + if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 { + klog.Warningf("Skipping API %v because it has no resources.", groupVersion) + continue + } + + apiGroupVersion := s.getAPIGroupVersion(apiGroupInfo, groupVersion, apiPrefix) + if apiGroupInfo.OptionsExternalVersion != nil { + apiGroupVersion.OptionsExternalVersion = apiGroupInfo.OptionsExternalVersion + } + apiGroupVersion.OpenAPIModels = openAPIModels + + if openAPIModels != nil && utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { + typeConverter, err := fieldmanager.NewTypeConverter(openAPIModels, false) + if err != nil { + return err + } + apiGroupVersion.TypeConverter = typeConverter + } + + apiGroupVersion.MaxRequestBodyBytes = s.maxRequestBodyBytes + + r, err := apiGroupVersion.InstallREST(s.Handler.GoRestfulContainer) + if err != nil { + return fmt.Errorf("unable to setup API %v: %v", apiGroupInfo, err) + } + resourceInfos = append(resourceInfos, r...) + } + + if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionAPI) && + utilfeature.DefaultFeatureGate.Enabled(features.APIServerIdentity) { + // API installation happens before we start listening on the handlers, + // therefore it is safe to register ResourceInfos here. The handler will block + // write requests until the storage versions of the targeting resources are updated. + s.StorageVersionManager.AddResourceInfo(resourceInfos...) + } + + return nil +} + +func (s *GenericAPIServer) InstallLegacyAPIGroup(apiPrefix string, apiGroupInfo *APIGroupInfo) error { + if !s.legacyAPIGroupPrefixes.Has(apiPrefix) { + return fmt.Errorf("%q is not in the allowed legacy API prefixes: %v", apiPrefix, s.legacyAPIGroupPrefixes.List()) + } + + openAPIModels, err := s.getOpenAPIModels(apiPrefix, apiGroupInfo) + if err != nil { + return fmt.Errorf("unable to get openapi models: %v", err) + } + + if err := s.installAPIResources(apiPrefix, apiGroupInfo, openAPIModels); err != nil { + return err + } + + // Install the version handler. + // Add a handler at / to enumerate the supported api versions. + s.Handler.GoRestfulContainer.Add(discovery.NewLegacyRootAPIHandler(s.discoveryAddresses, s.Serializer, apiPrefix).WebService()) + + return nil +} + +// Exposes given api groups in the API. +func (s *GenericAPIServer) InstallAPIGroups(apiGroupInfos ...*APIGroupInfo) error { + for _, apiGroupInfo := range apiGroupInfos { + // Do not register empty group or empty version. Doing so claims /apis/ for the wrong entity to be returned. + // Catching these here places the error much closer to its origin + if len(apiGroupInfo.PrioritizedVersions[0].Group) == 0 { + return fmt.Errorf("cannot register handler with an empty group for %#v", *apiGroupInfo) + } + if len(apiGroupInfo.PrioritizedVersions[0].Version) == 0 { + return fmt.Errorf("cannot register handler with an empty version for %#v", *apiGroupInfo) + } + } + + openAPIModels, err := s.getOpenAPIModels(APIGroupPrefix, apiGroupInfos...) + if err != nil { + return fmt.Errorf("unable to get openapi models: %v", err) + } + + for _, apiGroupInfo := range apiGroupInfos { + if err := s.installAPIResources(APIGroupPrefix, apiGroupInfo, openAPIModels); err != nil { + return fmt.Errorf("unable to install api resources: %v", err) + } + + // setup discovery + // Install the version handler. + // Add a handler at /apis/ to enumerate all versions supported by this group. + apiVersionsForDiscovery := []metav1.GroupVersionForDiscovery{} + for _, groupVersion := range apiGroupInfo.PrioritizedVersions { + // Check the config to make sure that we elide versions that don't have any resources + if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 { + continue + } + apiVersionsForDiscovery = append(apiVersionsForDiscovery, metav1.GroupVersionForDiscovery{ + GroupVersion: groupVersion.String(), + Version: groupVersion.Version, + }) + } + preferredVersionForDiscovery := metav1.GroupVersionForDiscovery{ + GroupVersion: apiGroupInfo.PrioritizedVersions[0].String(), + Version: apiGroupInfo.PrioritizedVersions[0].Version, + } + apiGroup := metav1.APIGroup{ + Name: apiGroupInfo.PrioritizedVersions[0].Group, + Versions: apiVersionsForDiscovery, + PreferredVersion: preferredVersionForDiscovery, + } + + s.DiscoveryGroupManager.AddGroup(apiGroup) + s.Handler.GoRestfulContainer.Add(discovery.NewAPIGroupHandler(s.Serializer, apiGroup).WebService()) + } + return nil +} + +// Exposes the given api group in the API. +func (s *GenericAPIServer) InstallAPIGroup(apiGroupInfo *APIGroupInfo) error { + return s.InstallAPIGroups(apiGroupInfo) +} + +func (s *GenericAPIServer) getAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupVersion schema.GroupVersion, apiPrefix string) *genericapi.APIGroupVersion { + storage := make(map[string]rest.Storage) + for k, v := range apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version] { + storage[strings.ToLower(k)] = v + } + version := s.newAPIGroupVersion(apiGroupInfo, groupVersion) + version.Root = apiPrefix + version.Storage = storage + return version +} + +func (s *GenericAPIServer) newAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupVersion schema.GroupVersion) *genericapi.APIGroupVersion { + return &genericapi.APIGroupVersion{ + GroupVersion: groupVersion, + MetaGroupVersion: apiGroupInfo.MetaGroupVersion, + + ParameterCodec: apiGroupInfo.ParameterCodec, + Serializer: apiGroupInfo.NegotiatedSerializer, + Creater: apiGroupInfo.Scheme, + Convertor: apiGroupInfo.Scheme, + UnsafeConvertor: runtime.UnsafeObjectConvertor(apiGroupInfo.Scheme), + Defaulter: apiGroupInfo.Scheme, + Typer: apiGroupInfo.Scheme, + Linker: runtime.SelfLinker(meta.NewAccessor()), + + EquivalentResourceRegistry: s.EquivalentResourceRegistry, + + Admit: s.admissionControl, + MinRequestTimeout: s.minRequestTimeout, + Authorizer: s.Authorizer, + } +} + +// NewDefaultAPIGroupInfo returns an APIGroupInfo stubbed with "normal" values +// exposed for easier composition from other packages +func NewDefaultAPIGroupInfo(group string, scheme *runtime.Scheme, parameterCodec runtime.ParameterCodec, codecs serializer.CodecFactory) APIGroupInfo { + return APIGroupInfo{ + PrioritizedVersions: scheme.PrioritizedVersionsForGroup(group), + VersionedResourcesStorageMap: map[string]map[string]rest.Storage{}, + // TODO unhardcode this. It was hardcoded before, but we need to re-evaluate + OptionsExternalVersion: &schema.GroupVersion{Version: "v1"}, + Scheme: scheme, + ParameterCodec: parameterCodec, + NegotiatedSerializer: codecs, + } +} + +// getOpenAPIModels is a private method for getting the OpenAPI models +func (s *GenericAPIServer) getOpenAPIModels(apiPrefix string, apiGroupInfos ...*APIGroupInfo) (openapiproto.Models, error) { + if s.openAPIConfig == nil { + return nil, nil + } + pathsToIgnore := openapiutil.NewTrie(s.openAPIConfig.IgnorePrefixes) + resourceNames := make([]string, 0) + for _, apiGroupInfo := range apiGroupInfos { + groupResources, err := getResourceNamesForGroup(apiPrefix, apiGroupInfo, pathsToIgnore) + if err != nil { + return nil, err + } + resourceNames = append(resourceNames, groupResources...) + } + + // Build the openapi definitions for those resources and convert it to proto models + openAPISpec, err := openapibuilder.BuildOpenAPIDefinitionsForResources(s.openAPIConfig, resourceNames...) + if err != nil { + return nil, err + } + for _, apiGroupInfo := range apiGroupInfos { + apiGroupInfo.StaticOpenAPISpec = openAPISpec + } + return utilopenapi.ToProtoModels(openAPISpec) +} + +// getResourceNamesForGroup is a private method for getting the canonical names for each resource to build in an api group +func getResourceNamesForGroup(apiPrefix string, apiGroupInfo *APIGroupInfo, pathsToIgnore openapiutil.Trie) ([]string, error) { + // Get the canonical names of every resource we need to build in this api group + resourceNames := make([]string, 0) + for _, groupVersion := range apiGroupInfo.PrioritizedVersions { + for resource, storage := range apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version] { + path := gpath.Join(apiPrefix, groupVersion.Group, groupVersion.Version, resource) + if !pathsToIgnore.HasPrefix(path) { + kind, err := genericapi.GetResourceKind(groupVersion, storage, apiGroupInfo.Scheme) + if err != nil { + return nil, err + } + sampleObject, err := apiGroupInfo.Scheme.New(kind) + if err != nil { + return nil, err + } + name := openapiutil.GetCanonicalTypeName(sampleObject) + resourceNames = append(resourceNames, name) + } + } + } + + return resourceNames, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/handler.go b/vendor/k8s.io/apiserver/pkg/server/handler.go new file mode 100644 index 0000000000..85d8af1ce3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/handler.go @@ -0,0 +1,190 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "bytes" + "fmt" + "net/http" + rt "runtime" + "sort" + "strings" + + "github.com/emicklei/go-restful" + "k8s.io/klog/v2" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/server/mux" +) + +// APIServerHandlers holds the different http.Handlers used by the API server. +// This includes the full handler chain, the director (which chooses between gorestful and nonGoRestful, +// the gorestful handler (used for the API) which falls through to the nonGoRestful handler on unregistered paths, +// and the nonGoRestful handler (which can contain a fallthrough of its own) +// FullHandlerChain -> Director -> {GoRestfulContainer,NonGoRestfulMux} based on inspection of registered web services +type APIServerHandler struct { + // FullHandlerChain is the one that is eventually served with. It should include the full filter + // chain and then call the Director. + FullHandlerChain http.Handler + // The registered APIs. InstallAPIs uses this. Other servers probably shouldn't access this directly. + GoRestfulContainer *restful.Container + // NonGoRestfulMux is the final HTTP handler in the chain. + // It comes after all filters and the API handling + // This is where other servers can attach handler to various parts of the chain. + NonGoRestfulMux *mux.PathRecorderMux + + // Director is here so that we can properly handle fall through and proxy cases. + // This looks a bit bonkers, but here's what's happening. We need to have /apis handling registered in gorestful in order to have + // swagger generated for compatibility. Doing that with `/apis` as a webservice, means that it forcibly 404s (no defaulting allowed) + // all requests which are not /apis or /apis/. We need those calls to fall through behind goresful for proper delegation. Trying to + // register for a pattern which includes everything behind it doesn't work because gorestful negotiates for verbs and content encoding + // and all those things go crazy when gorestful really just needs to pass through. In addition, openapi enforces unique verb constraints + // which we don't fit into and it still muddies up swagger. Trying to switch the webservices into a route doesn't work because the + // containing webservice faces all the same problems listed above. + // This leads to the crazy thing done here. Our mux does what we need, so we'll place it in front of gorestful. It will introspect to + // decide if the route is likely to be handled by goresful and route there if needed. Otherwise, it goes to PostGoRestful mux in + // order to handle "normal" paths and delegation. Hopefully no API consumers will ever have to deal with this level of detail. I think + // we should consider completely removing gorestful. + // Other servers should only use this opaquely to delegate to an API server. + Director http.Handler +} + +// HandlerChainBuilderFn is used to wrap the GoRestfulContainer handler using the provided handler chain. +// It is normally used to apply filtering like authentication and authorization +type HandlerChainBuilderFn func(apiHandler http.Handler) http.Handler + +func NewAPIServerHandler(name string, s runtime.NegotiatedSerializer, handlerChainBuilder HandlerChainBuilderFn, notFoundHandler http.Handler) *APIServerHandler { + nonGoRestfulMux := mux.NewPathRecorderMux(name) + if notFoundHandler != nil { + nonGoRestfulMux.NotFoundHandler(notFoundHandler) + } + + gorestfulContainer := restful.NewContainer() + gorestfulContainer.ServeMux = http.NewServeMux() + gorestfulContainer.Router(restful.CurlyRouter{}) // e.g. for proxy/{kind}/{name}/{*} + gorestfulContainer.RecoverHandler(func(panicReason interface{}, httpWriter http.ResponseWriter) { + logStackOnRecover(s, panicReason, httpWriter) + }) + gorestfulContainer.ServiceErrorHandler(func(serviceErr restful.ServiceError, request *restful.Request, response *restful.Response) { + serviceErrorHandler(s, serviceErr, request, response) + }) + + director := director{ + name: name, + goRestfulContainer: gorestfulContainer, + nonGoRestfulMux: nonGoRestfulMux, + } + + return &APIServerHandler{ + FullHandlerChain: handlerChainBuilder(director), + GoRestfulContainer: gorestfulContainer, + NonGoRestfulMux: nonGoRestfulMux, + Director: director, + } +} + +// ListedPaths returns the paths that should be shown under / +func (a *APIServerHandler) ListedPaths() []string { + var handledPaths []string + // Extract the paths handled using restful.WebService + for _, ws := range a.GoRestfulContainer.RegisteredWebServices() { + handledPaths = append(handledPaths, ws.RootPath()) + } + handledPaths = append(handledPaths, a.NonGoRestfulMux.ListedPaths()...) + sort.Strings(handledPaths) + + return handledPaths +} + +type director struct { + name string + goRestfulContainer *restful.Container + nonGoRestfulMux *mux.PathRecorderMux +} + +func (d director) ServeHTTP(w http.ResponseWriter, req *http.Request) { + path := req.URL.Path + + // check to see if our webservices want to claim this path + for _, ws := range d.goRestfulContainer.RegisteredWebServices() { + switch { + case ws.RootPath() == "/apis": + // if we are exactly /apis or /apis/, then we need special handling in loop. + // normally these are passed to the nonGoRestfulMux, but if discovery is enabled, it will go directly. + // We can't rely on a prefix match since /apis matches everything (see the big comment on Director above) + if path == "/apis" || path == "/apis/" { + klog.V(5).Infof("%v: %v %q satisfied by gorestful with webservice %v", d.name, req.Method, path, ws.RootPath()) + // don't use servemux here because gorestful servemuxes get messed up when removing webservices + // TODO fix gorestful, remove TPRs, or stop using gorestful + d.goRestfulContainer.Dispatch(w, req) + return + } + + case strings.HasPrefix(path, ws.RootPath()): + // ensure an exact match or a path boundary match + if len(path) == len(ws.RootPath()) || path[len(ws.RootPath())] == '/' { + klog.V(5).Infof("%v: %v %q satisfied by gorestful with webservice %v", d.name, req.Method, path, ws.RootPath()) + // don't use servemux here because gorestful servemuxes get messed up when removing webservices + // TODO fix gorestful, remove TPRs, or stop using gorestful + d.goRestfulContainer.Dispatch(w, req) + return + } + } + } + + // if we didn't find a match, then we just skip gorestful altogether + klog.V(5).Infof("%v: %v %q satisfied by nonGoRestful", d.name, req.Method, path) + d.nonGoRestfulMux.ServeHTTP(w, req) +} + +//TODO: Unify with RecoverPanics? +func logStackOnRecover(s runtime.NegotiatedSerializer, panicReason interface{}, w http.ResponseWriter) { + var buffer bytes.Buffer + buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason)) + for i := 2; ; i++ { + _, file, line, ok := rt.Caller(i) + if !ok { + break + } + buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line)) + } + klog.Errorln(buffer.String()) + + headers := http.Header{} + if ct := w.Header().Get("Content-Type"); len(ct) > 0 { + headers.Set("Accept", ct) + } + responsewriters.ErrorNegotiated(apierrors.NewGenericServerResponse(http.StatusInternalServerError, "", schema.GroupResource{}, "", "", 0, false), s, schema.GroupVersion{}, w, &http.Request{Header: headers}) +} + +func serviceErrorHandler(s runtime.NegotiatedSerializer, serviceErr restful.ServiceError, request *restful.Request, resp *restful.Response) { + responsewriters.ErrorNegotiated( + apierrors.NewGenericServerResponse(serviceErr.Code, "", schema.GroupResource{}, "", serviceErr.Message, 0, false), + s, + schema.GroupVersion{}, + resp, + request.Request, + ) +} + +// ServeHTTP makes it an http.Handler +func (a *APIServerHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + a.FullHandlerChain.ServeHTTP(w, r) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz.go new file mode 100644 index 0000000000..645886949b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/healthz.go @@ -0,0 +1,159 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "fmt" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/server/healthz" +) + +// AddHealthChecks adds HealthCheck(s) to health endpoints (healthz, livez, readyz) but +// configures the liveness grace period to be zero, which means we expect this health check +// to immediately indicate that the apiserver is unhealthy. +func (s *GenericAPIServer) AddHealthChecks(checks ...healthz.HealthChecker) error { + // we opt for a delay of zero here, because this entrypoint adds generic health checks + // and not health checks which are specifically related to kube-apiserver boot-sequences. + return s.addHealthChecks(0, checks...) +} + +// AddBootSequenceHealthChecks adds health checks to the old healthz endpoint (for backwards compatibility reasons) +// as well as livez and readyz. The livez grace period is defined by the value of the +// command-line flag --livez-grace-period; before the grace period elapses, the livez health checks +// will default to healthy. One may want to set a grace period in order to prevent the kubelet from restarting +// the kube-apiserver due to long-ish boot sequences. Readyz health checks, on the other hand, have no grace period, +// since readyz should fail until boot fully completes. +func (s *GenericAPIServer) AddBootSequenceHealthChecks(checks ...healthz.HealthChecker) error { + return s.addHealthChecks(s.livezGracePeriod, checks...) +} + +// addHealthChecks adds health checks to healthz, livez, and readyz. The delay passed in will set +// a corresponding grace period on livez. +func (s *GenericAPIServer) addHealthChecks(livezGracePeriod time.Duration, checks ...healthz.HealthChecker) error { + s.healthzLock.Lock() + defer s.healthzLock.Unlock() + if s.healthzChecksInstalled { + return fmt.Errorf("unable to add because the healthz endpoint has already been created") + } + s.healthzChecks = append(s.healthzChecks, checks...) + return s.addLivezChecks(livezGracePeriod, checks...) +} + +// addReadyzChecks allows you to add a HealthCheck to readyz. +func (s *GenericAPIServer) addReadyzChecks(checks ...healthz.HealthChecker) error { + s.readyzLock.Lock() + defer s.readyzLock.Unlock() + if s.readyzChecksInstalled { + return fmt.Errorf("unable to add because the readyz endpoint has already been created") + } + s.readyzChecks = append(s.readyzChecks, checks...) + return nil +} + +// addLivezChecks allows you to add a HealthCheck to livez. It will also automatically add a check to readyz, +// since we want to avoid being ready when we are not live. +func (s *GenericAPIServer) addLivezChecks(delay time.Duration, checks ...healthz.HealthChecker) error { + s.livezLock.Lock() + defer s.livezLock.Unlock() + if s.livezChecksInstalled { + return fmt.Errorf("unable to add because the livez endpoint has already been created") + } + for _, check := range checks { + s.livezChecks = append(s.livezChecks, delayedHealthCheck(check, s.livezClock, delay)) + } + return s.addReadyzChecks(checks...) +} + +// addReadyzShutdownCheck is a convenience function for adding a readyz shutdown check, so +// that we can register that the api-server is no longer ready while we attempt to gracefully +// shutdown. +func (s *GenericAPIServer) addReadyzShutdownCheck(stopCh <-chan struct{}) error { + return s.addReadyzChecks(shutdownCheck{stopCh}) +} + +// installHealthz creates the healthz endpoint for this server +func (s *GenericAPIServer) installHealthz() { + s.healthzLock.Lock() + defer s.healthzLock.Unlock() + s.healthzChecksInstalled = true + healthz.InstallHandler(s.Handler.NonGoRestfulMux, s.healthzChecks...) +} + +// installReadyz creates the readyz endpoint for this server. +func (s *GenericAPIServer) installReadyz() { + s.readyzLock.Lock() + defer s.readyzLock.Unlock() + s.readyzChecksInstalled = true + healthz.InstallReadyzHandler(s.Handler.NonGoRestfulMux, s.readyzChecks...) +} + +// installLivez creates the livez endpoint for this server. +func (s *GenericAPIServer) installLivez() { + s.livezLock.Lock() + defer s.livezLock.Unlock() + s.livezChecksInstalled = true + healthz.InstallLivezHandler(s.Handler.NonGoRestfulMux, s.livezChecks...) +} + +// shutdownCheck fails if the embedded channel is closed. This is intended to allow for graceful shutdown sequences +// for the apiserver. +type shutdownCheck struct { + StopCh <-chan struct{} +} + +func (shutdownCheck) Name() string { + return "shutdown" +} + +func (c shutdownCheck) Check(req *http.Request) error { + select { + case <-c.StopCh: + return fmt.Errorf("process is shutting down") + default: + } + return nil +} + +// delayedHealthCheck wraps a health check which will not fail until the explicitly defined delay has elapsed. This +// is intended for use primarily for livez health checks. +func delayedHealthCheck(check healthz.HealthChecker, clock clock.Clock, delay time.Duration) healthz.HealthChecker { + return delayedLivezCheck{ + check, + clock.Now().Add(delay), + clock, + } +} + +type delayedLivezCheck struct { + check healthz.HealthChecker + startCheck time.Time + clock clock.Clock +} + +func (c delayedLivezCheck) Name() string { + return c.check.Name() +} + +func (c delayedLivezCheck) Check(req *http.Request) error { + if c.clock.Now().After(c.startCheck) { + return c.check.Check(req) + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/doc.go b/vendor/k8s.io/apiserver/pkg/server/healthz/doc.go new file mode 100644 index 0000000000..d938caa371 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package healthz implements basic http server health checking. +// Usage: +// import "k8s.io/apiserver/pkg/server/healthz" +// healthz.InstallHandler(mux) +package healthz // import "k8s.io/apiserver/pkg/server/healthz" diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go new file mode 100644 index 0000000000..e80b8501ed --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go @@ -0,0 +1,291 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthz + +import ( + "bytes" + "fmt" + "net/http" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/server/httplog" + "k8s.io/klog/v2" +) + +// HealthChecker is a named healthz checker. +type HealthChecker interface { + Name() string + Check(req *http.Request) error +} + +// PingHealthz returns true automatically when checked +var PingHealthz HealthChecker = ping{} + +// ping implements the simplest possible healthz checker. +type ping struct{} + +func (ping) Name() string { + return "ping" +} + +// PingHealthz is a health check that returns true. +func (ping) Check(_ *http.Request) error { + return nil +} + +// LogHealthz returns true if logging is not blocked +var LogHealthz HealthChecker = &log{} + +type log struct { + startOnce sync.Once + lastVerified atomic.Value +} + +func (l *log) Name() string { + return "log" +} + +func (l *log) Check(_ *http.Request) error { + l.startOnce.Do(func() { + l.lastVerified.Store(time.Now()) + go wait.Forever(func() { + klog.Flush() + l.lastVerified.Store(time.Now()) + }, time.Minute) + }) + + lastVerified := l.lastVerified.Load().(time.Time) + if time.Since(lastVerified) < (2 * time.Minute) { + return nil + } + return fmt.Errorf("logging blocked") +} + +type cacheSyncWaiter interface { + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool +} + +type informerSync struct { + cacheSyncWaiter cacheSyncWaiter +} + +var _ HealthChecker = &informerSync{} + +// NewInformerSyncHealthz returns a new HealthChecker that will pass only if all informers in the given cacheSyncWaiter sync. +func NewInformerSyncHealthz(cacheSyncWaiter cacheSyncWaiter) HealthChecker { + return &informerSync{ + cacheSyncWaiter: cacheSyncWaiter, + } +} + +func (i *informerSync) Name() string { + return "informer-sync" +} + +func (i *informerSync) Check(_ *http.Request) error { + stopCh := make(chan struct{}) + // Close stopCh to force checking if informers are synced now. + close(stopCh) + + informersByStarted := make(map[bool][]string) + for informerType, started := range i.cacheSyncWaiter.WaitForCacheSync(stopCh) { + informersByStarted[started] = append(informersByStarted[started], informerType.String()) + } + + if notStarted := informersByStarted[false]; len(notStarted) > 0 { + return fmt.Errorf("%d informers not started yet: %v", len(notStarted), notStarted) + } + return nil +} + +// NamedCheck returns a healthz checker for the given name and function. +func NamedCheck(name string, check func(r *http.Request) error) HealthChecker { + return &healthzCheck{name, check} +} + +// InstallHandler registers handlers for health checking on the path +// "/healthz" to mux. *All handlers* for mux must be specified in +// exactly one call to InstallHandler. Calling InstallHandler more +// than once for the same mux will result in a panic. +func InstallHandler(mux mux, checks ...HealthChecker) { + InstallPathHandler(mux, "/healthz", checks...) +} + +// InstallReadyzHandler registers handlers for health checking on the path +// "/readyz" to mux. *All handlers* for mux must be specified in +// exactly one call to InstallHandler. Calling InstallHandler more +// than once for the same mux will result in a panic. +func InstallReadyzHandler(mux mux, checks ...HealthChecker) { + InstallPathHandler(mux, "/readyz", checks...) +} + +// InstallLivezHandler registers handlers for liveness checking on the path +// "/livez" to mux. *All handlers* for mux must be specified in +// exactly one call to InstallHandler. Calling InstallHandler more +// than once for the same mux will result in a panic. +func InstallLivezHandler(mux mux, checks ...HealthChecker) { + InstallPathHandler(mux, "/livez", checks...) +} + +// InstallPathHandler registers handlers for health checking on +// a specific path to mux. *All handlers* for the path must be +// specified in exactly one call to InstallPathHandler. Calling +// InstallPathHandler more than once for the same path and mux will +// result in a panic. +func InstallPathHandler(mux mux, path string, checks ...HealthChecker) { + if len(checks) == 0 { + klog.V(5).Info("No default health checks specified. Installing the ping handler.") + checks = []HealthChecker{PingHealthz} + } + + klog.V(5).Infof("Installing health checkers for (%v): %v", path, formatQuoted(checkerNames(checks...)...)) + + name := strings.Split(strings.TrimPrefix(path, "/"), "/")[0] + mux.Handle(path, + metrics.InstrumentHandlerFunc("GET", + /* group = */ "", + /* version = */ "", + /* resource = */ "", + /* subresource = */ path, + /* scope = */ "", + /* component = */ "", + /* deprecated */ false, + /* removedRelease */ "", + handleRootHealth(name, checks...))) + for _, check := range checks { + mux.Handle(fmt.Sprintf("%s/%v", path, check.Name()), adaptCheckToHandler(check.Check)) + } +} + +// mux is an interface describing the methods InstallHandler requires. +type mux interface { + Handle(pattern string, handler http.Handler) +} + +// healthzCheck implements HealthChecker on an arbitrary name and check function. +type healthzCheck struct { + name string + check func(r *http.Request) error +} + +var _ HealthChecker = &healthzCheck{} + +func (c *healthzCheck) Name() string { + return c.name +} + +func (c *healthzCheck) Check(r *http.Request) error { + return c.check(r) +} + +// getExcludedChecks extracts the health check names to be excluded from the query param +func getExcludedChecks(r *http.Request) sets.String { + checks, found := r.URL.Query()["exclude"] + if found { + return sets.NewString(checks...) + } + return sets.NewString() +} + +// handleRootHealth returns an http.HandlerFunc that serves the provided checks. +func handleRootHealth(name string, checks ...HealthChecker) http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + excluded := getExcludedChecks(r) + // failedVerboseLogOutput is for output to the log. It indicates detailed failed output information for the log. + var failedVerboseLogOutput bytes.Buffer + var failedChecks []string + var individualCheckOutput bytes.Buffer + for _, check := range checks { + // no-op the check if we've specified we want to exclude the check + if excluded.Has(check.Name()) { + excluded.Delete(check.Name()) + fmt.Fprintf(&individualCheckOutput, "[+]%s excluded: ok\n", check.Name()) + continue + } + if err := check.Check(r); err != nil { + // don't include the error since this endpoint is public. If someone wants more detail + // they should have explicit permission to the detailed checks. + fmt.Fprintf(&individualCheckOutput, "[-]%s failed: reason withheld\n", check.Name()) + // but we do want detailed information for our log + fmt.Fprintf(&failedVerboseLogOutput, "[-]%s failed: %v\n", check.Name(), err) + failedChecks = append(failedChecks, check.Name()) + } else { + fmt.Fprintf(&individualCheckOutput, "[+]%s ok\n", check.Name()) + } + } + if excluded.Len() > 0 { + fmt.Fprintf(&individualCheckOutput, "warn: some health checks cannot be excluded: no matches for %s\n", formatQuoted(excluded.List()...)) + klog.Warningf("cannot exclude some health checks, no health checks are installed matching %s", + formatQuoted(excluded.List()...)) + } + // always be verbose on failure + if len(failedChecks) > 0 { + klog.V(2).Infof("%s check failed: %s\n%v", strings.Join(failedChecks, ","), name, failedVerboseLogOutput.String()) + http.Error(httplog.Unlogged(r, w), fmt.Sprintf("%s%s check failed", individualCheckOutput.String(), name), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.Header().Set("X-Content-Type-Options", "nosniff") + if _, found := r.URL.Query()["verbose"]; !found { + fmt.Fprint(w, "ok") + return + } + + individualCheckOutput.WriteTo(w) + fmt.Fprintf(w, "%s check passed\n", name) + }) +} + +// adaptCheckToHandler returns an http.HandlerFunc that serves the provided checks. +func adaptCheckToHandler(c func(r *http.Request) error) http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + err := c(r) + if err != nil { + http.Error(w, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError) + } else { + fmt.Fprint(w, "ok") + } + }) +} + +// checkerNames returns the names of the checks in the same order as passed in. +func checkerNames(checks ...HealthChecker) []string { + // accumulate the names of checks for printing them out. + checkerNames := make([]string, 0, len(checks)) + for _, check := range checks { + checkerNames = append(checkerNames, check.Name()) + } + return checkerNames +} + +// formatQuoted returns a formatted string of the health check names, +// preserving the order passed in. +func formatQuoted(names ...string) string { + quoted := make([]string, 0, len(names)) + for _, name := range names { + quoted = append(quoted, fmt.Sprintf("%q", name)) + } + return strings.Join(quoted, ",") +} diff --git a/vendor/k8s.io/apiserver/pkg/server/hooks.go b/vendor/k8s.io/apiserver/pkg/server/hooks.go new file mode 100644 index 0000000000..999ad36000 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/hooks.go @@ -0,0 +1,244 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "errors" + "fmt" + "net/http" + "runtime/debug" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/server/healthz" + restclient "k8s.io/client-go/rest" + "k8s.io/klog/v2" +) + +// PostStartHookFunc is a function that is called after the server has started. +// It must properly handle cases like: +// 1. asynchronous start in multiple API server processes +// 2. conflicts between the different processes all trying to perform the same action +// 3. partially complete work (API server crashes while running your hook) +// 4. API server access **BEFORE** your hook has completed +// Think of it like a mini-controller that is super privileged and gets to run in-process +// If you use this feature, tag @deads2k on github who has promised to review code for anyone's PostStartHook +// until it becomes easier to use. +type PostStartHookFunc func(context PostStartHookContext) error + +// PreShutdownHookFunc is a function that can be added to the shutdown logic. +type PreShutdownHookFunc func() error + +// PostStartHookContext provides information about this API server to a PostStartHookFunc +type PostStartHookContext struct { + // LoopbackClientConfig is a config for a privileged loopback connection to the API server + LoopbackClientConfig *restclient.Config + // StopCh is the channel that will be closed when the server stops + StopCh <-chan struct{} +} + +// PostStartHookProvider is an interface in addition to provide a post start hook for the api server +type PostStartHookProvider interface { + PostStartHook() (string, PostStartHookFunc, error) +} + +type postStartHookEntry struct { + hook PostStartHookFunc + // originatingStack holds the stack that registered postStartHooks. This allows us to show a more helpful message + // for duplicate registration. + originatingStack string + + // done will be closed when the postHook is finished + done chan struct{} +} + +type PostStartHookConfigEntry struct { + hook PostStartHookFunc + // originatingStack holds the stack that registered postStartHooks. This allows us to show a more helpful message + // for duplicate registration. + originatingStack string +} + +type preShutdownHookEntry struct { + hook PreShutdownHookFunc +} + +// AddPostStartHook allows you to add a PostStartHook. +func (s *GenericAPIServer) AddPostStartHook(name string, hook PostStartHookFunc) error { + if len(name) == 0 { + return fmt.Errorf("missing name") + } + if hook == nil { + return fmt.Errorf("hook func may not be nil: %q", name) + } + if s.disabledPostStartHooks.Has(name) { + klog.V(1).Infof("skipping %q because it was explicitly disabled", name) + return nil + } + + s.postStartHookLock.Lock() + defer s.postStartHookLock.Unlock() + + if s.postStartHooksCalled { + return fmt.Errorf("unable to add %q because PostStartHooks have already been called", name) + } + if postStartHook, exists := s.postStartHooks[name]; exists { + // this is programmer error, but it can be hard to debug + return fmt.Errorf("unable to add %q because it was already registered by: %s", name, postStartHook.originatingStack) + } + + // done is closed when the poststarthook is finished. This is used by the health check to be able to indicate + // that the poststarthook is finished + done := make(chan struct{}) + if err := s.AddBootSequenceHealthChecks(postStartHookHealthz{name: "poststarthook/" + name, done: done}); err != nil { + return err + } + s.postStartHooks[name] = postStartHookEntry{hook: hook, originatingStack: string(debug.Stack()), done: done} + + return nil +} + +// AddPostStartHookOrDie allows you to add a PostStartHook, but dies on failure +func (s *GenericAPIServer) AddPostStartHookOrDie(name string, hook PostStartHookFunc) { + if err := s.AddPostStartHook(name, hook); err != nil { + klog.Fatalf("Error registering PostStartHook %q: %v", name, err) + } +} + +// AddPreShutdownHook allows you to add a PreShutdownHook. +func (s *GenericAPIServer) AddPreShutdownHook(name string, hook PreShutdownHookFunc) error { + if len(name) == 0 { + return fmt.Errorf("missing name") + } + if hook == nil { + return nil + } + + s.preShutdownHookLock.Lock() + defer s.preShutdownHookLock.Unlock() + + if s.preShutdownHooksCalled { + return fmt.Errorf("unable to add %q because PreShutdownHooks have already been called", name) + } + if _, exists := s.preShutdownHooks[name]; exists { + return fmt.Errorf("unable to add %q because it is already registered", name) + } + + s.preShutdownHooks[name] = preShutdownHookEntry{hook: hook} + + return nil +} + +// AddPreShutdownHookOrDie allows you to add a PostStartHook, but dies on failure +func (s *GenericAPIServer) AddPreShutdownHookOrDie(name string, hook PreShutdownHookFunc) { + if err := s.AddPreShutdownHook(name, hook); err != nil { + klog.Fatalf("Error registering PreShutdownHook %q: %v", name, err) + } +} + +// RunPostStartHooks runs the PostStartHooks for the server +func (s *GenericAPIServer) RunPostStartHooks(stopCh <-chan struct{}) { + s.postStartHookLock.Lock() + defer s.postStartHookLock.Unlock() + s.postStartHooksCalled = true + + context := PostStartHookContext{ + LoopbackClientConfig: s.LoopbackClientConfig, + StopCh: stopCh, + } + + for hookName, hookEntry := range s.postStartHooks { + go runPostStartHook(hookName, hookEntry, context) + } +} + +// RunPreShutdownHooks runs the PreShutdownHooks for the server +func (s *GenericAPIServer) RunPreShutdownHooks() error { + var errorList []error + + s.preShutdownHookLock.Lock() + defer s.preShutdownHookLock.Unlock() + s.preShutdownHooksCalled = true + + for hookName, hookEntry := range s.preShutdownHooks { + if err := runPreShutdownHook(hookName, hookEntry); err != nil { + errorList = append(errorList, err) + } + } + return utilerrors.NewAggregate(errorList) +} + +// isPostStartHookRegistered checks whether a given PostStartHook is registered +func (s *GenericAPIServer) isPostStartHookRegistered(name string) bool { + s.postStartHookLock.Lock() + defer s.postStartHookLock.Unlock() + _, exists := s.postStartHooks[name] + return exists +} + +func runPostStartHook(name string, entry postStartHookEntry, context PostStartHookContext) { + var err error + func() { + // don't let the hook *accidentally* panic and kill the server + defer utilruntime.HandleCrash() + err = entry.hook(context) + }() + // if the hook intentionally wants to kill server, let it. + if err != nil { + klog.Fatalf("PostStartHook %q failed: %v", name, err) + } + close(entry.done) +} + +func runPreShutdownHook(name string, entry preShutdownHookEntry) error { + var err error + func() { + // don't let the hook *accidentally* panic and kill the server + defer utilruntime.HandleCrash() + err = entry.hook() + }() + if err != nil { + return fmt.Errorf("PreShutdownHook %q failed: %v", name, err) + } + return nil +} + +// postStartHookHealthz implements a healthz check for poststarthooks. It will return a "hookNotFinished" +// error until the poststarthook is finished. +type postStartHookHealthz struct { + name string + + // done will be closed when the postStartHook is finished + done chan struct{} +} + +var _ healthz.HealthChecker = postStartHookHealthz{} + +func (h postStartHookHealthz) Name() string { + return h.name +} + +var hookNotFinished = errors.New("not finished") + +func (h postStartHookHealthz) Check(req *http.Request) error { + select { + case <-h.done: + return nil + default: + return hookNotFinished + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/doc.go b/vendor/k8s.io/apiserver/pkg/server/httplog/doc.go new file mode 100644 index 0000000000..caa6572c76 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/httplog/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package httplog contains a helper object and functions to maintain a log +// along with an http response. +package httplog // import "k8s.io/apiserver/pkg/server/httplog" diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go new file mode 100644 index 0000000000..4cb5306672 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go @@ -0,0 +1,243 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httplog + +import ( + "bufio" + "context" + "fmt" + "net" + "net/http" + "runtime" + "time" + + "k8s.io/klog/v2" +) + +// StacktracePred returns true if a stacktrace should be logged for this status. +type StacktracePred func(httpStatus int) (logStacktrace bool) + +type logger interface { + Addf(format string, data ...interface{}) +} + +type respLoggerContextKeyType int + +// respLoggerContextKey is used to store the respLogger pointer in the request context. +const respLoggerContextKey respLoggerContextKeyType = iota + +// Add a layer on top of ResponseWriter, so we can track latency and error +// message sources. +// +// TODO now that we're using go-restful, we shouldn't need to be wrapping +// the http.ResponseWriter. We can recover panics from go-restful, and +// the logging value is questionable. +type respLogger struct { + hijacked bool + statusRecorded bool + status int + statusStack string + addedInfo string + startTime time.Time + + captureErrorOutput bool + + req *http.Request + w http.ResponseWriter + + logStacktracePred StacktracePred +} + +// Simple logger that logs immediately when Addf is called +type passthroughLogger struct{} + +// Addf logs info immediately. +func (passthroughLogger) Addf(format string, data ...interface{}) { + klog.V(2).Info(fmt.Sprintf(format, data...)) +} + +// DefaultStacktracePred is the default implementation of StacktracePred. +func DefaultStacktracePred(status int) bool { + return (status < http.StatusOK || status >= http.StatusInternalServerError) && status != http.StatusSwitchingProtocols +} + +// WithLogging wraps the handler with logging. +func WithLogging(handler http.Handler, pred StacktracePred) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + if old := respLoggerFromContext(req); old != nil { + panic("multiple WithLogging calls!") + } + rl := newLogged(req, w).StacktraceWhen(pred) + req = req.WithContext(context.WithValue(ctx, respLoggerContextKey, rl)) + + if klog.V(3).Enabled() { + defer func() { klog.InfoS("HTTP", rl.LogArgs()...) }() + } + handler.ServeHTTP(rl, req) + }) +} + +// respLoggerFromContext returns the respLogger or nil. +func respLoggerFromContext(req *http.Request) *respLogger { + ctx := req.Context() + val := ctx.Value(respLoggerContextKey) + if rl, ok := val.(*respLogger); ok { + return rl + } + return nil +} + +// newLogged turns a normal response writer into a logged response writer. +func newLogged(req *http.Request, w http.ResponseWriter) *respLogger { + return &respLogger{ + startTime: time.Now(), + req: req, + w: w, + logStacktracePred: DefaultStacktracePred, + } +} + +// LogOf returns the logger hiding in w. If there is not an existing logger +// then a passthroughLogger will be created which will log to stdout immediately +// when Addf is called. +func LogOf(req *http.Request, w http.ResponseWriter) logger { + if rl := respLoggerFromContext(req); rl != nil { + return rl + } + return &passthroughLogger{} +} + +// Unlogged returns the original ResponseWriter, or w if it is not our inserted logger. +func Unlogged(req *http.Request, w http.ResponseWriter) http.ResponseWriter { + if rl := respLoggerFromContext(req); rl != nil { + return rl.w + } + return w +} + +// StacktraceWhen sets the stacktrace logging predicate, which decides when to log a stacktrace. +// There's a default, so you don't need to call this unless you don't like the default. +func (rl *respLogger) StacktraceWhen(pred StacktracePred) *respLogger { + rl.logStacktracePred = pred + return rl +} + +// StatusIsNot returns a StacktracePred which will cause stacktraces to be logged +// for any status *not* in the given list. +func StatusIsNot(statuses ...int) StacktracePred { + statusesNoTrace := map[int]bool{} + for _, s := range statuses { + statusesNoTrace[s] = true + } + return func(status int) bool { + _, ok := statusesNoTrace[status] + return !ok + } +} + +// Addf adds additional data to be logged with this request. +func (rl *respLogger) Addf(format string, data ...interface{}) { + rl.addedInfo += "\n" + fmt.Sprintf(format, data...) +} + +func (rl *respLogger) LogArgs() []interface{} { + latency := time.Since(rl.startTime) + if rl.hijacked { + return []interface{}{ + "verb", rl.req.Method, + "URI", rl.req.RequestURI, + "latency", latency, + "userAgent", rl.req.UserAgent(), + "srcIP", rl.req.RemoteAddr, + "hijacked", true, + } + } + args := []interface{}{ + "verb", rl.req.Method, + "URI", rl.req.RequestURI, + "latency", latency, + "userAgent", rl.req.UserAgent(), + "srcIP", rl.req.RemoteAddr, + "resp", rl.status, + } + if len(rl.statusStack) > 0 { + args = append(args, "statusStack", rl.statusStack) + } + + if len(rl.addedInfo) > 0 { + args = append(args, "addedInfo", rl.addedInfo) + } + return args +} + +// Header implements http.ResponseWriter. +func (rl *respLogger) Header() http.Header { + return rl.w.Header() +} + +// Write implements http.ResponseWriter. +func (rl *respLogger) Write(b []byte) (int, error) { + if !rl.statusRecorded { + rl.recordStatus(http.StatusOK) // Default if WriteHeader hasn't been called + } + if rl.captureErrorOutput { + rl.Addf("logging error output: %q\n", string(b)) + } + return rl.w.Write(b) +} + +// Flush implements http.Flusher even if the underlying http.Writer doesn't implement it. +// Flush is used for streaming purposes and allows to flush buffered data to the client. +func (rl *respLogger) Flush() { + if flusher, ok := rl.w.(http.Flusher); ok { + flusher.Flush() + } else if klog.V(2).Enabled() { + klog.InfoDepth(1, fmt.Sprintf("Unable to convert %+v into http.Flusher", rl.w)) + } +} + +// WriteHeader implements http.ResponseWriter. +func (rl *respLogger) WriteHeader(status int) { + rl.recordStatus(status) + rl.w.WriteHeader(status) +} + +// Hijack implements http.Hijacker. +func (rl *respLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { + rl.hijacked = true + return rl.w.(http.Hijacker).Hijack() +} + +// CloseNotify implements http.CloseNotifier +func (rl *respLogger) CloseNotify() <-chan bool { + return rl.w.(http.CloseNotifier).CloseNotify() +} + +func (rl *respLogger) recordStatus(status int) { + rl.status = status + rl.statusRecorded = true + if rl.logStacktracePred(status) { + // Only log stacks for errors + stack := make([]byte, 50*1024) + stack = stack[:runtime.Stack(stack, false)] + rl.statusStack = "\n" + string(stack) + rl.captureErrorOutput = true + } else { + rl.statusStack = "" + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/mux/OWNERS b/vendor/k8s.io/apiserver/pkg/server/mux/OWNERS new file mode 100644 index 0000000000..4da107c8c3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/mux/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- sttts diff --git a/vendor/k8s.io/apiserver/pkg/server/mux/doc.go b/vendor/k8s.io/apiserver/pkg/server/mux/doc.go new file mode 100644 index 0000000000..178aa9fe6c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/mux/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mux contains abstractions for http multiplexing of APIs. +package mux // import "k8s.io/apiserver/pkg/server/mux" diff --git a/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go b/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go new file mode 100644 index 0000000000..cb4941f026 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go @@ -0,0 +1,278 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mux + +import ( + "fmt" + "net/http" + "runtime/debug" + "sort" + "strings" + "sync" + "sync/atomic" + + "k8s.io/klog/v2" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +// PathRecorderMux wraps a mux object and records the registered exposedPaths. +type PathRecorderMux struct { + // name is used for logging so you can trace requests through + name string + + lock sync.Mutex + notFoundHandler http.Handler + pathToHandler map[string]http.Handler + prefixToHandler map[string]http.Handler + + // mux stores a pathHandler and is used to handle the actual serving. + // Turns out, we want to accept trailing slashes, BUT we don't care about handling + // everything under them. This does exactly matches only unless its explicitly requested to + // do something different + mux atomic.Value + + // exposedPaths is the list of paths that should be shown at / + exposedPaths []string + + // pathStacks holds the stacks of all registered paths. This allows us to show a more helpful message + // before the "http: multiple registrations for %s" panic. + pathStacks map[string]string +} + +// pathHandler is an http.Handler that will satisfy requests first by exact match, then by prefix, +// then by notFoundHandler +type pathHandler struct { + // muxName is used for logging so you can trace requests through + muxName string + + // pathToHandler is a map of exactly matching request to its handler + pathToHandler map[string]http.Handler + + // this has to be sorted by most slashes then by length + prefixHandlers []prefixHandler + + // notFoundHandler is the handler to use for satisfying requests with no other match + notFoundHandler http.Handler +} + +// prefixHandler holds the prefix it should match and the handler to use +type prefixHandler struct { + // prefix is the prefix to test for a request match + prefix string + // handler is used to satisfy matching requests + handler http.Handler +} + +// NewPathRecorderMux creates a new PathRecorderMux +func NewPathRecorderMux(name string) *PathRecorderMux { + ret := &PathRecorderMux{ + name: name, + pathToHandler: map[string]http.Handler{}, + prefixToHandler: map[string]http.Handler{}, + mux: atomic.Value{}, + exposedPaths: []string{}, + pathStacks: map[string]string{}, + } + + ret.mux.Store(&pathHandler{notFoundHandler: http.NotFoundHandler()}) + return ret +} + +// ListedPaths returns the registered handler exposedPaths. +func (m *PathRecorderMux) ListedPaths() []string { + handledPaths := append([]string{}, m.exposedPaths...) + sort.Strings(handledPaths) + + return handledPaths +} + +func (m *PathRecorderMux) trackCallers(path string) { + if existingStack, ok := m.pathStacks[path]; ok { + utilruntime.HandleError(fmt.Errorf("registered %q from %v", path, existingStack)) + } + m.pathStacks[path] = string(debug.Stack()) +} + +// refreshMuxLocked creates a new mux and must be called while locked. Otherwise the view of handlers may +// not be consistent +func (m *PathRecorderMux) refreshMuxLocked() { + newMux := &pathHandler{ + muxName: m.name, + pathToHandler: map[string]http.Handler{}, + prefixHandlers: []prefixHandler{}, + notFoundHandler: http.NotFoundHandler(), + } + if m.notFoundHandler != nil { + newMux.notFoundHandler = m.notFoundHandler + } + for path, handler := range m.pathToHandler { + newMux.pathToHandler[path] = handler + } + + keys := sets.StringKeySet(m.prefixToHandler).List() + sort.Sort(sort.Reverse(byPrefixPriority(keys))) + for _, prefix := range keys { + newMux.prefixHandlers = append(newMux.prefixHandlers, prefixHandler{ + prefix: prefix, + handler: m.prefixToHandler[prefix], + }) + } + + m.mux.Store(newMux) +} + +// NotFoundHandler sets the handler to use if there's no match for a give path +func (m *PathRecorderMux) NotFoundHandler(notFoundHandler http.Handler) { + m.lock.Lock() + defer m.lock.Unlock() + + m.notFoundHandler = notFoundHandler + + m.refreshMuxLocked() +} + +// Unregister removes a path from the mux. +func (m *PathRecorderMux) Unregister(path string) { + m.lock.Lock() + defer m.lock.Unlock() + + delete(m.pathToHandler, path) + delete(m.prefixToHandler, path) + delete(m.pathStacks, path) + for i := range m.exposedPaths { + if m.exposedPaths[i] == path { + m.exposedPaths = append(m.exposedPaths[:i], m.exposedPaths[i+1:]...) + break + } + } + + m.refreshMuxLocked() +} + +// Handle registers the handler for the given pattern. +// If a handler already exists for pattern, Handle panics. +func (m *PathRecorderMux) Handle(path string, handler http.Handler) { + m.lock.Lock() + defer m.lock.Unlock() + m.trackCallers(path) + + m.exposedPaths = append(m.exposedPaths, path) + m.pathToHandler[path] = handler + m.refreshMuxLocked() +} + +// HandleFunc registers the handler function for the given pattern. +// If a handler already exists for pattern, Handle panics. +func (m *PathRecorderMux) HandleFunc(path string, handler func(http.ResponseWriter, *http.Request)) { + m.Handle(path, http.HandlerFunc(handler)) +} + +// UnlistedHandle registers the handler for the given pattern, but doesn't list it. +// If a handler already exists for pattern, Handle panics. +func (m *PathRecorderMux) UnlistedHandle(path string, handler http.Handler) { + m.lock.Lock() + defer m.lock.Unlock() + m.trackCallers(path) + + m.pathToHandler[path] = handler + m.refreshMuxLocked() +} + +// UnlistedHandleFunc registers the handler function for the given pattern, but doesn't list it. +// If a handler already exists for pattern, Handle panics. +func (m *PathRecorderMux) UnlistedHandleFunc(path string, handler func(http.ResponseWriter, *http.Request)) { + m.UnlistedHandle(path, http.HandlerFunc(handler)) +} + +// HandlePrefix is like Handle, but matches for anything under the path. Like a standard golang trailing slash. +func (m *PathRecorderMux) HandlePrefix(path string, handler http.Handler) { + if !strings.HasSuffix(path, "/") { + panic(fmt.Sprintf("%q must end in a trailing slash", path)) + } + + m.lock.Lock() + defer m.lock.Unlock() + m.trackCallers(path) + + m.exposedPaths = append(m.exposedPaths, path) + m.prefixToHandler[path] = handler + m.refreshMuxLocked() +} + +// UnlistedHandlePrefix is like UnlistedHandle, but matches for anything under the path. Like a standard golang trailing slash. +func (m *PathRecorderMux) UnlistedHandlePrefix(path string, handler http.Handler) { + if !strings.HasSuffix(path, "/") { + panic(fmt.Sprintf("%q must end in a trailing slash", path)) + } + + m.lock.Lock() + defer m.lock.Unlock() + m.trackCallers(path) + + m.prefixToHandler[path] = handler + m.refreshMuxLocked() +} + +// ServeHTTP makes it an http.Handler +func (m *PathRecorderMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + m.mux.Load().(*pathHandler).ServeHTTP(w, r) +} + +// ServeHTTP makes it an http.Handler +func (h *pathHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if exactHandler, ok := h.pathToHandler[r.URL.Path]; ok { + klog.V(5).Infof("%v: %q satisfied by exact match", h.muxName, r.URL.Path) + exactHandler.ServeHTTP(w, r) + return + } + + for _, prefixHandler := range h.prefixHandlers { + if strings.HasPrefix(r.URL.Path, prefixHandler.prefix) { + klog.V(5).Infof("%v: %q satisfied by prefix %v", h.muxName, r.URL.Path, prefixHandler.prefix) + prefixHandler.handler.ServeHTTP(w, r) + return + } + } + + klog.V(5).Infof("%v: %q satisfied by NotFoundHandler", h.muxName, r.URL.Path) + h.notFoundHandler.ServeHTTP(w, r) +} + +// byPrefixPriority sorts url prefixes by the order in which they should be tested by the mux +// this has to be sorted by most slashes then by length so that we can iterate straight +// through to match the "best" one first. +type byPrefixPriority []string + +func (s byPrefixPriority) Len() int { return len(s) } +func (s byPrefixPriority) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byPrefixPriority) Less(i, j int) bool { + lhsNumParts := strings.Count(s[i], "/") + rhsNumParts := strings.Count(s[j], "/") + if lhsNumParts != rhsNumParts { + return lhsNumParts < rhsNumParts + } + + lhsLen := len(s[i]) + rhsLen := len(s[j]) + if lhsLen != rhsLen { + return lhsLen < rhsLen + } + + return strings.Compare(s[i], s[j]) < 0 +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/OWNERS b/vendor/k8s.io/apiserver/pkg/server/options/OWNERS new file mode 100644 index 0000000000..0e84109d7d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/OWNERS @@ -0,0 +1,16 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- smarterclayton +- wojtek-t +- deads2k +- liggitt +- nikhiljindal +- sttts +- jlowdermilk +- soltysh +- dims +- cjcullen +- ping035627 +- xiangpengzhao +- enj diff --git a/vendor/k8s.io/apiserver/pkg/server/options/admission.go b/vendor/k8s.io/apiserver/pkg/server/options/admission.go new file mode 100644 index 0000000000..765e2ad2b6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/admission.go @@ -0,0 +1,234 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "strings" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" + "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle" + mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" + validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating" + apiserverapi "k8s.io/apiserver/pkg/apis/apiserver" + apiserverapiv1 "k8s.io/apiserver/pkg/apis/apiserver/v1" + apiserverapiv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/component-base/featuregate" +) + +var configScheme = runtime.NewScheme() + +func init() { + utilruntime.Must(apiserverapi.AddToScheme(configScheme)) + utilruntime.Must(apiserverapiv1alpha1.AddToScheme(configScheme)) + utilruntime.Must(apiserverapiv1.AddToScheme(configScheme)) +} + +// AdmissionOptions holds the admission options +type AdmissionOptions struct { + // RecommendedPluginOrder holds an ordered list of plugin names we recommend to use by default + RecommendedPluginOrder []string + // DefaultOffPlugins is a set of plugin names that is disabled by default + DefaultOffPlugins sets.String + + // EnablePlugins indicates plugins to be enabled passed through `--enable-admission-plugins`. + EnablePlugins []string + // DisablePlugins indicates plugins to be disabled passed through `--disable-admission-plugins`. + DisablePlugins []string + // ConfigFile is the file path with admission control configuration. + ConfigFile string + // Plugins contains all registered plugins. + Plugins *admission.Plugins + // Decorators is a list of admission decorator to wrap around the admission plugins + Decorators admission.Decorators +} + +// NewAdmissionOptions creates a new instance of AdmissionOptions +// Note: +// In addition it calls RegisterAllAdmissionPlugins to register +// all generic admission plugins. +// +// Provides the list of RecommendedPluginOrder that holds sane values +// that can be used by servers that don't care about admission chain. +// Servers that do care can overwrite/append that field after creation. +func NewAdmissionOptions() *AdmissionOptions { + options := &AdmissionOptions{ + Plugins: admission.NewPlugins(), + Decorators: admission.Decorators{admission.DecoratorFunc(admissionmetrics.WithControllerMetrics)}, + // This list is mix of mutating admission plugins and validating + // admission plugins. The apiserver always runs the validating ones + // after all the mutating ones, so their relative order in this list + // doesn't matter. + RecommendedPluginOrder: []string{lifecycle.PluginName, mutatingwebhook.PluginName, validatingwebhook.PluginName}, + DefaultOffPlugins: sets.NewString(), + } + server.RegisterAllAdmissionPlugins(options.Plugins) + return options +} + +// AddFlags adds flags related to admission for a specific APIServer to the specified FlagSet +func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) { + if a == nil { + return + } + + fs.StringSliceVar(&a.EnablePlugins, "enable-admission-plugins", a.EnablePlugins, ""+ + "admission plugins that should be enabled in addition to default enabled ones ("+ + strings.Join(a.defaultEnabledPluginNames(), ", ")+"). "+ + "Comma-delimited list of admission plugins: "+strings.Join(a.Plugins.Registered(), ", ")+". "+ + "The order of plugins in this flag does not matter.") + fs.StringSliceVar(&a.DisablePlugins, "disable-admission-plugins", a.DisablePlugins, ""+ + "admission plugins that should be disabled although they are in the default enabled plugins list ("+ + strings.Join(a.defaultEnabledPluginNames(), ", ")+"). "+ + "Comma-delimited list of admission plugins: "+strings.Join(a.Plugins.Registered(), ", ")+". "+ + "The order of plugins in this flag does not matter.") + fs.StringVar(&a.ConfigFile, "admission-control-config-file", a.ConfigFile, + "File with admission control configuration.") +} + +// ApplyTo adds the admission chain to the server configuration. +// In case admission plugin names were not provided by a cluster-admin they will be prepared from the recommended/default values. +// In addition the method lazily initializes a generic plugin that is appended to the list of pluginInitializers +// note this method uses: +// genericconfig.Authorizer +func (a *AdmissionOptions) ApplyTo( + c *server.Config, + informers informers.SharedInformerFactory, + kubeAPIServerClientConfig *rest.Config, + features featuregate.FeatureGate, + pluginInitializers ...admission.PluginInitializer, +) error { + if a == nil { + return nil + } + + // Admission depends on CoreAPI to set SharedInformerFactory and ClientConfig. + if informers == nil { + return fmt.Errorf("admission depends on a Kubernetes core API shared informer, it cannot be nil") + } + + pluginNames := a.enabledPluginNames() + + pluginsConfigProvider, err := admission.ReadAdmissionConfiguration(pluginNames, a.ConfigFile, configScheme) + if err != nil { + return fmt.Errorf("failed to read plugin config: %v", err) + } + + clientset, err := kubernetes.NewForConfig(kubeAPIServerClientConfig) + if err != nil { + return err + } + genericInitializer := initializer.New(clientset, informers, c.Authorization.Authorizer, features) + initializersChain := admission.PluginInitializers{} + pluginInitializers = append(pluginInitializers, genericInitializer) + initializersChain = append(initializersChain, pluginInitializers...) + + admissionChain, err := a.Plugins.NewFromPlugins(pluginNames, pluginsConfigProvider, initializersChain, a.Decorators) + if err != nil { + return err + } + + c.AdmissionControl = admissionmetrics.WithStepMetrics(admissionChain) + return nil +} + +// Validate verifies flags passed to AdmissionOptions. +func (a *AdmissionOptions) Validate() []error { + if a == nil { + return nil + } + + errs := []error{} + + registeredPlugins := sets.NewString(a.Plugins.Registered()...) + for _, name := range a.EnablePlugins { + if !registeredPlugins.Has(name) { + errs = append(errs, fmt.Errorf("enable-admission-plugins plugin %q is unknown", name)) + } + } + + for _, name := range a.DisablePlugins { + if !registeredPlugins.Has(name) { + errs = append(errs, fmt.Errorf("disable-admission-plugins plugin %q is unknown", name)) + } + } + + enablePlugins := sets.NewString(a.EnablePlugins...) + disablePlugins := sets.NewString(a.DisablePlugins...) + if len(enablePlugins.Intersection(disablePlugins).List()) > 0 { + errs = append(errs, fmt.Errorf("%v in enable-admission-plugins and disable-admission-plugins "+ + "overlapped", enablePlugins.Intersection(disablePlugins).List())) + } + + // Verify RecommendedPluginOrder. + recommendPlugins := sets.NewString(a.RecommendedPluginOrder...) + intersections := registeredPlugins.Intersection(recommendPlugins) + if !intersections.Equal(recommendPlugins) { + // Developer error, this should never run in. + errs = append(errs, fmt.Errorf("plugins %v in RecommendedPluginOrder are not registered", + recommendPlugins.Difference(intersections).List())) + } + if !intersections.Equal(registeredPlugins) { + // Developer error, this should never run in. + errs = append(errs, fmt.Errorf("plugins %v registered are not in RecommendedPluginOrder", + registeredPlugins.Difference(intersections).List())) + } + + return errs +} + +// enabledPluginNames makes use of RecommendedPluginOrder, DefaultOffPlugins, +// EnablePlugins, DisablePlugins fields +// to prepare a list of ordered plugin names that are enabled. +func (a *AdmissionOptions) enabledPluginNames() []string { + allOffPlugins := append(a.DefaultOffPlugins.List(), a.DisablePlugins...) + disabledPlugins := sets.NewString(allOffPlugins...) + enabledPlugins := sets.NewString(a.EnablePlugins...) + disabledPlugins = disabledPlugins.Difference(enabledPlugins) + + orderedPlugins := []string{} + for _, plugin := range a.RecommendedPluginOrder { + if !disabledPlugins.Has(plugin) { + orderedPlugins = append(orderedPlugins, plugin) + } + } + + return orderedPlugins +} + +//Return names of plugins which are enabled by default +func (a *AdmissionOptions) defaultEnabledPluginNames() []string { + defaultOnPluginNames := []string{} + for _, pluginName := range a.RecommendedPluginOrder { + if !a.DefaultOffPlugins.Has(pluginName) { + defaultOnPluginNames = append(defaultOnPluginNames, pluginName) + } + } + + return defaultOnPluginNames +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go b/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go new file mode 100644 index 0000000000..794e89dedb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go @@ -0,0 +1,115 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "strings" + + "github.com/spf13/pflag" + + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/resourceconfig" + serverstore "k8s.io/apiserver/pkg/server/storage" + cliflag "k8s.io/component-base/cli/flag" +) + +// APIEnablementOptions contains the options for which resources to turn on and off. +// Given small aggregated API servers, this option isn't required for "normal" API servers +type APIEnablementOptions struct { + RuntimeConfig cliflag.ConfigurationMap +} + +func NewAPIEnablementOptions() *APIEnablementOptions { + return &APIEnablementOptions{ + RuntimeConfig: make(cliflag.ConfigurationMap), + } +} + +// AddFlags adds flags for a specific APIServer to the specified FlagSet +func (s *APIEnablementOptions) AddFlags(fs *pflag.FlagSet) { + fs.Var(&s.RuntimeConfig, "runtime-config", ""+ + "A set of key=value pairs that enable or disable built-in APIs. Supported options are:\n"+ + "v1=true|false for the core API group\n"+ + "/=true|false for a specific API group and version (e.g. apps/v1=true)\n"+ + "api/all=true|false controls all API versions\n"+ + "api/ga=true|false controls all API versions of the form v[0-9]+\n"+ + "api/beta=true|false controls all API versions of the form v[0-9]+beta[0-9]+\n"+ + "api/alpha=true|false controls all API versions of the form v[0-9]+alpha[0-9]+\n"+ + "api/legacy is deprecated, and will be removed in a future version") +} + +// Validate validates RuntimeConfig with a list of registries. +// Usually this list only has one element, the apiserver registry of the process. +// But in the advanced (and usually not recommended) case of delegated apiservers there can be more. +// Validate will filter out the known groups of each registry. +// If anything is left over after that, an error is returned. +func (s *APIEnablementOptions) Validate(registries ...GroupRegisty) []error { + if s == nil { + return nil + } + + errors := []error{} + if s.RuntimeConfig[resourceconfig.APIAll] == "false" && len(s.RuntimeConfig) == 1 { + // Do not allow only set api/all=false, in such case apiserver startup has no meaning. + return append(errors, fmt.Errorf("invalid key with only %v=false", resourceconfig.APIAll)) + } + + groups, err := resourceconfig.ParseGroups(s.RuntimeConfig) + if err != nil { + return append(errors, err) + } + + for _, registry := range registries { + // filter out known groups + groups = unknownGroups(groups, registry) + } + if len(groups) != 0 { + errors = append(errors, fmt.Errorf("unknown api groups %s", strings.Join(groups, ","))) + } + + return errors +} + +// ApplyTo override MergedResourceConfig with defaults and registry +func (s *APIEnablementOptions) ApplyTo(c *server.Config, defaultResourceConfig *serverstore.ResourceConfig, registry resourceconfig.GroupVersionRegistry) error { + + if s == nil { + return nil + } + + mergedResourceConfig, err := resourceconfig.MergeAPIResourceConfigs(defaultResourceConfig, s.RuntimeConfig, registry) + c.MergedResourceConfig = mergedResourceConfig + + return err +} + +func unknownGroups(groups []string, registry GroupRegisty) []string { + unknownGroups := []string{} + for _, group := range groups { + if !registry.IsGroupRegistered(group) { + unknownGroups = append(unknownGroups, group) + } + } + return unknownGroups +} + +// GroupRegisty provides a method to check whether given group is registered. +type GroupRegisty interface { + // IsRegistered returns true if given group is registered. + IsGroupRegistered(group string) bool +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/audit.go b/vendor/k8s.io/apiserver/pkg/server/options/audit.go new file mode 100644 index 0000000000..c3a709dcf1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/audit.go @@ -0,0 +1,607 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/spf13/pflag" + "gopkg.in/natefinch/lumberjack.v2" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + auditv1 "k8s.io/apiserver/pkg/apis/audit/v1" + auditv1alpha1 "k8s.io/apiserver/pkg/apis/audit/v1alpha1" + auditv1beta1 "k8s.io/apiserver/pkg/apis/audit/v1beta1" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/audit/policy" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/egressselector" + "k8s.io/apiserver/pkg/util/webhook" + pluginbuffered "k8s.io/apiserver/plugin/pkg/audit/buffered" + pluginlog "k8s.io/apiserver/plugin/pkg/audit/log" + plugintruncate "k8s.io/apiserver/plugin/pkg/audit/truncate" + pluginwebhook "k8s.io/apiserver/plugin/pkg/audit/webhook" +) + +const ( + // Default configuration values for ModeBatch. + defaultBatchBufferSize = 10000 // Buffer up to 10000 events before starting discarding. + // These batch parameters are only used by the webhook backend. + defaultBatchMaxSize = 400 // Only send up to 400 events at a time. + defaultBatchMaxWait = 30 * time.Second // Send events at least twice a minute. + defaultBatchThrottleQPS = 10 // Limit the send rate by 10 QPS. + defaultBatchThrottleBurst = 15 // Allow up to 15 QPS burst. +) + +func appendBackend(existing, newBackend audit.Backend) audit.Backend { + if existing == nil { + return newBackend + } + if newBackend == nil { + return existing + } + return audit.Union(existing, newBackend) +} + +type AuditOptions struct { + // Policy configuration file for filtering audit events that are captured. + // If unspecified, a default is provided. + PolicyFile string + + // Plugin options + LogOptions AuditLogOptions + WebhookOptions AuditWebhookOptions +} + +const ( + // ModeBatch indicates that the audit backend should buffer audit events + // internally, sending batch updates either once a certain number of + // events have been received or a certain amount of time has passed. + ModeBatch = "batch" + // ModeBlocking causes the audit backend to block on every attempt to process + // a set of events. This causes requests to the API server to wait for the + // flush before sending a response. + ModeBlocking = "blocking" + // ModeBlockingStrict is the same as ModeBlocking, except when there is + // a failure during audit logging at RequestReceived stage, the whole + // request to apiserver will fail. + ModeBlockingStrict = "blocking-strict" +) + +// AllowedModes is the modes known for audit backends. +var AllowedModes = []string{ + ModeBatch, + ModeBlocking, + ModeBlockingStrict, +} + +type AuditBatchOptions struct { + // Should the backend asynchronous batch events to the webhook backend or + // should the backend block responses? + // + // Defaults to asynchronous batch events. + Mode string + // Configuration for batching backend. Only used in batch mode. + BatchConfig pluginbuffered.BatchConfig +} + +type AuditTruncateOptions struct { + // Whether truncating is enabled or not. + Enabled bool + + // Truncating configuration. + TruncateConfig plugintruncate.Config +} + +// AuditLogOptions determines the output of the structured audit log by default. +type AuditLogOptions struct { + Path string + MaxAge int + MaxBackups int + MaxSize int + Format string + Compress bool + + BatchOptions AuditBatchOptions + TruncateOptions AuditTruncateOptions + + // API group version used for serializing audit events. + GroupVersionString string +} + +// AuditWebhookOptions control the webhook configuration for audit events. +type AuditWebhookOptions struct { + ConfigFile string + InitialBackoff time.Duration + + BatchOptions AuditBatchOptions + TruncateOptions AuditTruncateOptions + + // API group version used for serializing audit events. + GroupVersionString string +} + +// AuditDynamicOptions control the configuration of dynamic backends for audit events +type AuditDynamicOptions struct { + // Enabled tells whether the dynamic audit capability is enabled. + Enabled bool + + // Configuration for batching backend. This is currently only used as an override + // for integration tests + BatchConfig *pluginbuffered.BatchConfig +} + +func NewAuditOptions() *AuditOptions { + return &AuditOptions{ + WebhookOptions: AuditWebhookOptions{ + InitialBackoff: pluginwebhook.DefaultInitialBackoffDelay, + BatchOptions: AuditBatchOptions{ + Mode: ModeBatch, + BatchConfig: defaultWebhookBatchConfig(), + }, + TruncateOptions: NewAuditTruncateOptions(), + GroupVersionString: "audit.k8s.io/v1", + }, + LogOptions: AuditLogOptions{ + Format: pluginlog.FormatJson, + BatchOptions: AuditBatchOptions{ + Mode: ModeBlocking, + BatchConfig: defaultLogBatchConfig(), + }, + TruncateOptions: NewAuditTruncateOptions(), + GroupVersionString: "audit.k8s.io/v1", + }, + } +} + +func NewAuditTruncateOptions() AuditTruncateOptions { + return AuditTruncateOptions{ + Enabled: false, + TruncateConfig: plugintruncate.Config{ + MaxBatchSize: 10 * 1024 * 1024, // 10MB + MaxEventSize: 100 * 1024, // 100KB + }, + } +} + +// Validate checks invalid config combination +func (o *AuditOptions) Validate() []error { + if o == nil { + return nil + } + + var allErrors []error + allErrors = append(allErrors, o.LogOptions.Validate()...) + allErrors = append(allErrors, o.WebhookOptions.Validate()...) + + return allErrors +} + +func validateBackendMode(pluginName string, mode string) error { + for _, m := range AllowedModes { + if m == mode { + return nil + } + } + return fmt.Errorf("invalid audit %s mode %s, allowed modes are %q", pluginName, mode, strings.Join(AllowedModes, ",")) +} + +func validateBackendBatchOptions(pluginName string, options AuditBatchOptions) error { + if err := validateBackendMode(pluginName, options.Mode); err != nil { + return err + } + if options.Mode != ModeBatch { + // Don't validate the unused options. + return nil + } + config := options.BatchConfig + if config.BufferSize <= 0 { + return fmt.Errorf("invalid audit batch %s buffer size %v, must be a positive number", pluginName, config.BufferSize) + } + if config.MaxBatchSize <= 0 { + return fmt.Errorf("invalid audit batch %s max batch size %v, must be a positive number", pluginName, config.MaxBatchSize) + } + if config.ThrottleEnable { + if config.ThrottleQPS <= 0 { + return fmt.Errorf("invalid audit batch %s throttle QPS %v, must be a positive number", pluginName, config.ThrottleQPS) + } + if config.ThrottleBurst <= 0 { + return fmt.Errorf("invalid audit batch %s throttle burst %v, must be a positive number", pluginName, config.ThrottleBurst) + } + } + return nil +} + +var knownGroupVersions = []schema.GroupVersion{ + auditv1alpha1.SchemeGroupVersion, + auditv1beta1.SchemeGroupVersion, + auditv1.SchemeGroupVersion, +} + +func validateGroupVersionString(groupVersion string) error { + gv, err := schema.ParseGroupVersion(groupVersion) + if err != nil { + return err + } + if !knownGroupVersion(gv) { + return fmt.Errorf("invalid group version, allowed versions are %q", knownGroupVersions) + } + return nil +} + +func knownGroupVersion(gv schema.GroupVersion) bool { + for _, knownGv := range knownGroupVersions { + if gv == knownGv { + return true + } + } + return false +} + +func (o *AuditOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.StringVar(&o.PolicyFile, "audit-policy-file", o.PolicyFile, + "Path to the file that defines the audit policy configuration.") + + o.LogOptions.AddFlags(fs) + o.LogOptions.BatchOptions.AddFlags(pluginlog.PluginName, fs) + o.LogOptions.TruncateOptions.AddFlags(pluginlog.PluginName, fs) + o.WebhookOptions.AddFlags(fs) + o.WebhookOptions.BatchOptions.AddFlags(pluginwebhook.PluginName, fs) + o.WebhookOptions.TruncateOptions.AddFlags(pluginwebhook.PluginName, fs) +} + +func (o *AuditOptions) ApplyTo( + c *server.Config, +) error { + if o == nil { + return nil + } + if c == nil { + return fmt.Errorf("server config must be non-nil") + } + + // 1. Build policy checker + checker, err := o.newPolicyChecker() + if err != nil { + return err + } + + // 2. Build log backend + var logBackend audit.Backend + if w := o.LogOptions.getWriter(); w != nil { + if checker == nil { + klog.V(2).Info("No audit policy file provided, no events will be recorded for log backend") + } else { + logBackend = o.LogOptions.newBackend(w) + } + } + + // 3. Build webhook backend + var webhookBackend audit.Backend + if o.WebhookOptions.enabled() { + if checker == nil { + klog.V(2).Info("No audit policy file provided, no events will be recorded for webhook backend") + } else { + if c.EgressSelector != nil { + egressDialer, err := c.EgressSelector.Lookup(egressselector.ControlPlane.AsNetworkContext()) + if err != nil { + return err + } + webhookBackend, err = o.WebhookOptions.newUntruncatedBackend(egressDialer) + } else { + webhookBackend, err = o.WebhookOptions.newUntruncatedBackend(nil) + } + if err != nil { + return err + } + } + } + + groupVersion, err := schema.ParseGroupVersion(o.WebhookOptions.GroupVersionString) + if err != nil { + return err + } + + // 4. Apply dynamic options. + var dynamicBackend audit.Backend + if webhookBackend != nil { + // if only webhook is enabled wrap it in the truncate options + dynamicBackend = o.WebhookOptions.TruncateOptions.wrapBackend(webhookBackend, groupVersion) + } + + // 5. Set the policy checker + c.AuditPolicyChecker = checker + + // 6. Join the log backend with the webhooks + c.AuditBackend = appendBackend(logBackend, dynamicBackend) + + if c.AuditBackend != nil { + klog.V(2).Infof("Using audit backend: %s", c.AuditBackend) + } + return nil +} + +func (o *AuditOptions) newPolicyChecker() (policy.Checker, error) { + if o.PolicyFile == "" { + return nil, nil + } + + p, err := policy.LoadPolicyFromFile(o.PolicyFile) + if err != nil { + return nil, fmt.Errorf("loading audit policy file: %v", err) + } + return policy.NewChecker(p), nil +} + +func (o *AuditBatchOptions) AddFlags(pluginName string, fs *pflag.FlagSet) { + fs.StringVar(&o.Mode, fmt.Sprintf("audit-%s-mode", pluginName), o.Mode, + "Strategy for sending audit events. Blocking indicates sending events should block"+ + " server responses. Batch causes the backend to buffer and write events"+ + " asynchronously. Known modes are "+strings.Join(AllowedModes, ",")+".") + fs.IntVar(&o.BatchConfig.BufferSize, fmt.Sprintf("audit-%s-batch-buffer-size", pluginName), + o.BatchConfig.BufferSize, "The size of the buffer to store events before "+ + "batching and writing. Only used in batch mode.") + fs.IntVar(&o.BatchConfig.MaxBatchSize, fmt.Sprintf("audit-%s-batch-max-size", pluginName), + o.BatchConfig.MaxBatchSize, "The maximum size of a batch. Only used in batch mode.") + fs.DurationVar(&o.BatchConfig.MaxBatchWait, fmt.Sprintf("audit-%s-batch-max-wait", pluginName), + o.BatchConfig.MaxBatchWait, "The amount of time to wait before force writing the "+ + "batch that hadn't reached the max size. Only used in batch mode.") + fs.BoolVar(&o.BatchConfig.ThrottleEnable, fmt.Sprintf("audit-%s-batch-throttle-enable", pluginName), + o.BatchConfig.ThrottleEnable, "Whether batching throttling is enabled. Only used in batch mode.") + fs.Float32Var(&o.BatchConfig.ThrottleQPS, fmt.Sprintf("audit-%s-batch-throttle-qps", pluginName), + o.BatchConfig.ThrottleQPS, "Maximum average number of batches per second. "+ + "Only used in batch mode.") + fs.IntVar(&o.BatchConfig.ThrottleBurst, fmt.Sprintf("audit-%s-batch-throttle-burst", pluginName), + o.BatchConfig.ThrottleBurst, "Maximum number of requests sent at the same "+ + "moment if ThrottleQPS was not utilized before. Only used in batch mode.") +} + +type ignoreErrorsBackend struct { + audit.Backend +} + +func (i *ignoreErrorsBackend) ProcessEvents(ev ...*auditinternal.Event) bool { + i.Backend.ProcessEvents(ev...) + return true +} + +func (i *ignoreErrorsBackend) String() string { + return fmt.Sprintf("ignoreErrors<%s>", i.Backend) +} + +func (o *AuditBatchOptions) wrapBackend(delegate audit.Backend) audit.Backend { + if o.Mode == ModeBlockingStrict { + return delegate + } + if o.Mode == ModeBlocking { + return &ignoreErrorsBackend{Backend: delegate} + } + return pluginbuffered.NewBackend(delegate, o.BatchConfig) +} + +func (o *AuditTruncateOptions) Validate(pluginName string) error { + config := o.TruncateConfig + if config.MaxEventSize <= 0 { + return fmt.Errorf("invalid audit truncate %s max event size %v, must be a positive number", pluginName, config.MaxEventSize) + } + if config.MaxBatchSize < config.MaxEventSize { + return fmt.Errorf("invalid audit truncate %s max batch size %v, must be greater than "+ + "max event size (%v)", pluginName, config.MaxBatchSize, config.MaxEventSize) + } + return nil +} + +func (o *AuditTruncateOptions) AddFlags(pluginName string, fs *pflag.FlagSet) { + fs.BoolVar(&o.Enabled, fmt.Sprintf("audit-%s-truncate-enabled", pluginName), + o.Enabled, "Whether event and batch truncating is enabled.") + fs.Int64Var(&o.TruncateConfig.MaxBatchSize, fmt.Sprintf("audit-%s-truncate-max-batch-size", pluginName), + o.TruncateConfig.MaxBatchSize, "Maximum size of the batch sent to the underlying backend. "+ + "Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, "+ + "it is split into several batches of smaller size.") + fs.Int64Var(&o.TruncateConfig.MaxEventSize, fmt.Sprintf("audit-%s-truncate-max-event-size", pluginName), + o.TruncateConfig.MaxEventSize, "Maximum size of the audit event sent to the underlying backend. "+ + "If the size of an event is greater than this number, first request and response are removed, and "+ + "if this doesn't reduce the size enough, event is discarded.") +} + +func (o *AuditTruncateOptions) wrapBackend(delegate audit.Backend, gv schema.GroupVersion) audit.Backend { + if !o.Enabled { + return delegate + } + return plugintruncate.NewBackend(delegate, o.TruncateConfig, gv) +} + +func (o *AuditLogOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.Path, "audit-log-path", o.Path, + "If set, all requests coming to the apiserver will be logged to this file. '-' means standard out.") + fs.IntVar(&o.MaxAge, "audit-log-maxage", o.MaxAge, + "The maximum number of days to retain old audit log files based on the timestamp encoded in their filename.") + fs.IntVar(&o.MaxBackups, "audit-log-maxbackup", o.MaxBackups, + "The maximum number of old audit log files to retain.") + fs.IntVar(&o.MaxSize, "audit-log-maxsize", o.MaxSize, + "The maximum size in megabytes of the audit log file before it gets rotated.") + fs.StringVar(&o.Format, "audit-log-format", o.Format, + "Format of saved audits. \"legacy\" indicates 1-line text format for each event."+ + " \"json\" indicates structured json format. Known formats are "+ + strings.Join(pluginlog.AllowedFormats, ",")+".") + fs.StringVar(&o.GroupVersionString, "audit-log-version", o.GroupVersionString, + "API group and version used for serializing audit events written to log.") + fs.BoolVar(&o.Compress, "audit-log-compress", o.Compress, "If set, the rotated log files will be compressed using gzip.") +} + +func (o *AuditLogOptions) Validate() []error { + // Check whether the log backend is enabled based on the options. + if !o.enabled() { + return nil + } + + var allErrors []error + + if err := validateBackendBatchOptions(pluginlog.PluginName, o.BatchOptions); err != nil { + allErrors = append(allErrors, err) + } + if err := o.TruncateOptions.Validate(pluginlog.PluginName); err != nil { + allErrors = append(allErrors, err) + } + + if err := validateGroupVersionString(o.GroupVersionString); err != nil { + allErrors = append(allErrors, err) + } + + // Check log format + validFormat := false + for _, f := range pluginlog.AllowedFormats { + if f == o.Format { + validFormat = true + break + } + } + if !validFormat { + allErrors = append(allErrors, fmt.Errorf("invalid audit log format %s, allowed formats are %q", o.Format, strings.Join(pluginlog.AllowedFormats, ","))) + } + + // Check validities of MaxAge, MaxBackups and MaxSize of log options, if file log backend is enabled. + if o.MaxAge < 0 { + allErrors = append(allErrors, fmt.Errorf("--audit-log-maxage %v can't be a negative number", o.MaxAge)) + } + if o.MaxBackups < 0 { + allErrors = append(allErrors, fmt.Errorf("--audit-log-maxbackup %v can't be a negative number", o.MaxBackups)) + } + if o.MaxSize < 0 { + allErrors = append(allErrors, fmt.Errorf("--audit-log-maxsize %v can't be a negative number", o.MaxSize)) + } + + return allErrors +} + +// Check whether the log backend is enabled based on the options. +func (o *AuditLogOptions) enabled() bool { + return o != nil && o.Path != "" +} + +func (o *AuditLogOptions) getWriter() io.Writer { + if !o.enabled() { + return nil + } + + var w io.Writer = os.Stdout + if o.Path != "-" { + w = &lumberjack.Logger{ + Filename: o.Path, + MaxAge: o.MaxAge, + MaxBackups: o.MaxBackups, + MaxSize: o.MaxSize, + Compress: o.Compress, + } + } + return w +} + +func (o *AuditLogOptions) newBackend(w io.Writer) audit.Backend { + groupVersion, _ := schema.ParseGroupVersion(o.GroupVersionString) + log := pluginlog.NewBackend(w, o.Format, groupVersion) + log = o.BatchOptions.wrapBackend(log) + log = o.TruncateOptions.wrapBackend(log, groupVersion) + return log +} + +func (o *AuditWebhookOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.ConfigFile, "audit-webhook-config-file", o.ConfigFile, + "Path to a kubeconfig formatted file that defines the audit webhook configuration.") + fs.DurationVar(&o.InitialBackoff, "audit-webhook-initial-backoff", + o.InitialBackoff, "The amount of time to wait before retrying the first failed request.") + fs.DurationVar(&o.InitialBackoff, "audit-webhook-batch-initial-backoff", + o.InitialBackoff, "The amount of time to wait before retrying the first failed request.") + fs.MarkDeprecated("audit-webhook-batch-initial-backoff", + "Deprecated, use --audit-webhook-initial-backoff instead.") + fs.StringVar(&o.GroupVersionString, "audit-webhook-version", o.GroupVersionString, + "API group and version used for serializing audit events written to webhook.") +} + +func (o *AuditWebhookOptions) Validate() []error { + if !o.enabled() { + return nil + } + + var allErrors []error + if err := validateBackendBatchOptions(pluginwebhook.PluginName, o.BatchOptions); err != nil { + allErrors = append(allErrors, err) + } + if err := o.TruncateOptions.Validate(pluginwebhook.PluginName); err != nil { + allErrors = append(allErrors, err) + } + + if err := validateGroupVersionString(o.GroupVersionString); err != nil { + allErrors = append(allErrors, err) + } + return allErrors +} + +func (o *AuditWebhookOptions) enabled() bool { + return o != nil && o.ConfigFile != "" +} + +// newUntruncatedBackend returns a webhook backend without the truncate options applied +// this is done so that the same trucate backend can wrap both the webhook and dynamic backends +func (o *AuditWebhookOptions) newUntruncatedBackend(customDial utilnet.DialFunc) (audit.Backend, error) { + groupVersion, _ := schema.ParseGroupVersion(o.GroupVersionString) + webhook, err := pluginwebhook.NewBackend(o.ConfigFile, groupVersion, webhook.DefaultRetryBackoffWithInitialDelay(o.InitialBackoff), customDial) + if err != nil { + return nil, fmt.Errorf("initializing audit webhook: %v", err) + } + webhook = o.BatchOptions.wrapBackend(webhook) + return webhook, nil +} + +// defaultWebhookBatchConfig returns the default BatchConfig used by the Webhook backend. +func defaultWebhookBatchConfig() pluginbuffered.BatchConfig { + return pluginbuffered.BatchConfig{ + BufferSize: defaultBatchBufferSize, + MaxBatchSize: defaultBatchMaxSize, + MaxBatchWait: defaultBatchMaxWait, + + ThrottleEnable: true, + ThrottleQPS: defaultBatchThrottleQPS, + ThrottleBurst: defaultBatchThrottleBurst, + + AsyncDelegate: true, + } +} + +// defaultLogBatchConfig returns the default BatchConfig used by the Log backend. +func defaultLogBatchConfig() pluginbuffered.BatchConfig { + return pluginbuffered.BatchConfig{ + BufferSize: defaultBatchBufferSize, + // Batching is not useful for the log-file backend. + // MaxBatchWait ignored. + MaxBatchSize: 1, + ThrottleEnable: false, + // Asynchronous log threads just create lock contention. + AsyncDelegate: false, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authentication.go b/vendor/k8s.io/apiserver/pkg/server/options/authentication.go new file mode 100644 index 0000000000..e266fb73ef --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/authentication.go @@ -0,0 +1,421 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "strings" + "time" + + "k8s.io/apiserver/pkg/server/dynamiccertificates" + + "github.com/spf13/pflag" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authentication/authenticatorfactory" + "k8s.io/apiserver/pkg/authentication/request/headerrequest" + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2" + openapicommon "k8s.io/kube-openapi/pkg/common" +) + +// DefaultAuthWebhookRetryBackoff is the default backoff parameters for +// both authentication and authorization webhook used by the apiserver. +func DefaultAuthWebhookRetryBackoff() *wait.Backoff { + return &wait.Backoff{ + Duration: 500 * time.Millisecond, + Factor: 1.5, + Jitter: 0.2, + Steps: 5, + } +} + +type RequestHeaderAuthenticationOptions struct { + // ClientCAFile is the root certificate bundle to verify client certificates on incoming requests + // before trusting usernames in headers. + ClientCAFile string + + UsernameHeaders []string + GroupHeaders []string + ExtraHeaderPrefixes []string + AllowedNames []string +} + +func (s *RequestHeaderAuthenticationOptions) Validate() []error { + allErrors := []error{} + + if err := checkForWhiteSpaceOnly("requestheader-username-headers", s.UsernameHeaders...); err != nil { + allErrors = append(allErrors, err) + } + if err := checkForWhiteSpaceOnly("requestheader-group-headers", s.GroupHeaders...); err != nil { + allErrors = append(allErrors, err) + } + if err := checkForWhiteSpaceOnly("requestheader-extra-headers-prefix", s.ExtraHeaderPrefixes...); err != nil { + allErrors = append(allErrors, err) + } + if err := checkForWhiteSpaceOnly("requestheader-allowed-names", s.AllowedNames...); err != nil { + allErrors = append(allErrors, err) + } + + return allErrors +} + +func checkForWhiteSpaceOnly(flag string, headerNames ...string) error { + for _, headerName := range headerNames { + if len(strings.TrimSpace(headerName)) == 0 { + return fmt.Errorf("empty value in %q", flag) + } + } + + return nil +} + +func (s *RequestHeaderAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.StringSliceVar(&s.UsernameHeaders, "requestheader-username-headers", s.UsernameHeaders, ""+ + "List of request headers to inspect for usernames. X-Remote-User is common.") + + fs.StringSliceVar(&s.GroupHeaders, "requestheader-group-headers", s.GroupHeaders, ""+ + "List of request headers to inspect for groups. X-Remote-Group is suggested.") + + fs.StringSliceVar(&s.ExtraHeaderPrefixes, "requestheader-extra-headers-prefix", s.ExtraHeaderPrefixes, ""+ + "List of request header prefixes to inspect. X-Remote-Extra- is suggested.") + + fs.StringVar(&s.ClientCAFile, "requestheader-client-ca-file", s.ClientCAFile, ""+ + "Root certificate bundle to use to verify client certificates on incoming requests "+ + "before trusting usernames in headers specified by --requestheader-username-headers. "+ + "WARNING: generally do not depend on authorization being already done for incoming requests.") + + fs.StringSliceVar(&s.AllowedNames, "requestheader-allowed-names", s.AllowedNames, ""+ + "List of client certificate common names to allow to provide usernames in headers "+ + "specified by --requestheader-username-headers. If empty, any client certificate validated "+ + "by the authorities in --requestheader-client-ca-file is allowed.") +} + +// ToAuthenticationRequestHeaderConfig returns a RequestHeaderConfig config object for these options +// if necessary, nil otherwise. +func (s *RequestHeaderAuthenticationOptions) ToAuthenticationRequestHeaderConfig() (*authenticatorfactory.RequestHeaderConfig, error) { + if len(s.ClientCAFile) == 0 { + return nil, nil + } + + caBundleProvider, err := dynamiccertificates.NewDynamicCAContentFromFile("request-header", s.ClientCAFile) + if err != nil { + return nil, err + } + + return &authenticatorfactory.RequestHeaderConfig{ + UsernameHeaders: headerrequest.StaticStringSlice(s.UsernameHeaders), + GroupHeaders: headerrequest.StaticStringSlice(s.GroupHeaders), + ExtraHeaderPrefixes: headerrequest.StaticStringSlice(s.ExtraHeaderPrefixes), + CAContentProvider: caBundleProvider, + AllowedClientNames: headerrequest.StaticStringSlice(s.AllowedNames), + }, nil +} + +// ClientCertAuthenticationOptions provides different options for client cert auth. You should use `GetClientVerifyOptionFn` to +// get the verify options for your authenticator. +type ClientCertAuthenticationOptions struct { + // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + ClientCA string + + // CAContentProvider are the options for verifying incoming connections using mTLS and directly assigning to users. + // Generally this is the CA bundle file used to authenticate client certificates + // If non-nil, this takes priority over the ClientCA file. + CAContentProvider dynamiccertificates.CAContentProvider +} + +// GetClientVerifyOptionFn provides verify options for your authenticator while respecting the preferred order of verifiers. +func (s *ClientCertAuthenticationOptions) GetClientCAContentProvider() (dynamiccertificates.CAContentProvider, error) { + if s.CAContentProvider != nil { + return s.CAContentProvider, nil + } + + if len(s.ClientCA) == 0 { + return nil, nil + } + + return dynamiccertificates.NewDynamicCAContentFromFile("client-ca-bundle", s.ClientCA) +} + +func (s *ClientCertAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&s.ClientCA, "client-ca-file", s.ClientCA, ""+ + "If set, any request presenting a client certificate signed by one of "+ + "the authorities in the client-ca-file is authenticated with an identity "+ + "corresponding to the CommonName of the client certificate.") +} + +// DelegatingAuthenticationOptions provides an easy way for composing API servers to delegate their authentication to +// the root kube API server. The API federator will act as +// a front proxy and direction connections will be able to delegate to the core kube API server +type DelegatingAuthenticationOptions struct { + // RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the + // TokenAccessReview.authentication.k8s.io endpoint for checking tokens. + RemoteKubeConfigFile string + // RemoteKubeConfigFileOptional is specifying whether not specifying the kubeconfig or + // a missing in-cluster config will be fatal. + RemoteKubeConfigFileOptional bool + + // CacheTTL is the length of time that a token authentication answer will be cached. + CacheTTL time.Duration + + ClientCert ClientCertAuthenticationOptions + RequestHeader RequestHeaderAuthenticationOptions + + // SkipInClusterLookup indicates missing authentication configuration should not be retrieved from the cluster configmap + SkipInClusterLookup bool + + // TolerateInClusterLookupFailure indicates failures to look up authentication configuration from the cluster configmap should not be fatal. + // Setting this can result in an authenticator that will reject all requests. + TolerateInClusterLookupFailure bool + + // WebhookRetryBackoff specifies the backoff parameters for the authentication webhook retry logic. + // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed + // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. + WebhookRetryBackoff *wait.Backoff + + // ClientTimeout specifies a time limit for requests made by the authorization webhook client. + // The default value is set to 10 seconds. + ClientTimeout time.Duration +} + +func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions { + return &DelegatingAuthenticationOptions{ + // very low for responsiveness, but high enough to handle storms + CacheTTL: 10 * time.Second, + ClientCert: ClientCertAuthenticationOptions{}, + RequestHeader: RequestHeaderAuthenticationOptions{ + UsernameHeaders: []string{"x-remote-user"}, + GroupHeaders: []string{"x-remote-group"}, + ExtraHeaderPrefixes: []string{"x-remote-extra-"}, + }, + WebhookRetryBackoff: DefaultAuthWebhookRetryBackoff(), + ClientTimeout: 10 * time.Second, + } +} + +// WithCustomRetryBackoff sets the custom backoff parameters for the authentication webhook retry logic. +func (s *DelegatingAuthenticationOptions) WithCustomRetryBackoff(backoff wait.Backoff) { + s.WebhookRetryBackoff = &backoff +} + +// WithClientTimeout sets the given timeout for the authentication webhook client. +func (s *DelegatingAuthenticationOptions) WithClientTimeout(timeout time.Duration) { + s.ClientTimeout = timeout +} + +func (s *DelegatingAuthenticationOptions) Validate() []error { + allErrors := []error{} + allErrors = append(allErrors, s.RequestHeader.Validate()...) + + if s.WebhookRetryBackoff != nil && s.WebhookRetryBackoff.Steps <= 0 { + allErrors = append(allErrors, fmt.Errorf("number of webhook retry attempts must be greater than 1, but is: %d", s.WebhookRetryBackoff.Steps)) + } + + return allErrors +} + +func (s *DelegatingAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + var optionalKubeConfigSentence string + if s.RemoteKubeConfigFileOptional { + optionalKubeConfigSentence = " This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster." + } + fs.StringVar(&s.RemoteKubeConfigFile, "authentication-kubeconfig", s.RemoteKubeConfigFile, ""+ + "kubeconfig file pointing at the 'core' kubernetes server with enough rights to create "+ + "tokenreviews.authentication.k8s.io."+optionalKubeConfigSentence) + + fs.DurationVar(&s.CacheTTL, "authentication-token-webhook-cache-ttl", s.CacheTTL, + "The duration to cache responses from the webhook token authenticator.") + + s.ClientCert.AddFlags(fs) + s.RequestHeader.AddFlags(fs) + + fs.BoolVar(&s.SkipInClusterLookup, "authentication-skip-lookup", s.SkipInClusterLookup, ""+ + "If false, the authentication-kubeconfig will be used to lookup missing authentication "+ + "configuration from the cluster.") + fs.BoolVar(&s.TolerateInClusterLookupFailure, "authentication-tolerate-lookup-failure", s.TolerateInClusterLookupFailure, ""+ + "If true, failures to look up missing authentication configuration from the cluster are not considered fatal. "+ + "Note that this can result in authentication that treats all requests as anonymous.") +} + +func (s *DelegatingAuthenticationOptions) ApplyTo(authenticationInfo *server.AuthenticationInfo, servingInfo *server.SecureServingInfo, openAPIConfig *openapicommon.Config) error { + if s == nil { + authenticationInfo.Authenticator = nil + return nil + } + + cfg := authenticatorfactory.DelegatingAuthenticatorConfig{ + Anonymous: true, + CacheTTL: s.CacheTTL, + WebhookRetryBackoff: s.WebhookRetryBackoff, + } + + client, err := s.getClient() + if err != nil { + return fmt.Errorf("failed to get delegated authentication kubeconfig: %v", err) + } + + // configure token review + if client != nil { + cfg.TokenAccessReviewClient = client.AuthenticationV1().TokenReviews() + } + + // get the clientCA information + clientCAFileSpecified := len(s.ClientCert.ClientCA) > 0 + var clientCAProvider dynamiccertificates.CAContentProvider + if clientCAFileSpecified { + clientCAProvider, err = s.ClientCert.GetClientCAContentProvider() + if err != nil { + return fmt.Errorf("unable to load client CA file %q: %v", s.ClientCert.ClientCA, err) + } + cfg.ClientCertificateCAContentProvider = clientCAProvider + if err = authenticationInfo.ApplyClientCert(cfg.ClientCertificateCAContentProvider, servingInfo); err != nil { + return fmt.Errorf("unable to assign client CA file: %v", err) + } + + } else if !s.SkipInClusterLookup { + if client == nil { + klog.Warningf("No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/%s in %s, so client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + } else { + clientCAProvider, err = dynamiccertificates.NewDynamicCAFromConfigMapController("client-ca", authenticationConfigMapNamespace, authenticationConfigMapName, "client-ca-file", client) + if err != nil { + return fmt.Errorf("unable to load configmap based client CA file: %v", err) + } + cfg.ClientCertificateCAContentProvider = clientCAProvider + if err = authenticationInfo.ApplyClientCert(cfg.ClientCertificateCAContentProvider, servingInfo); err != nil { + return fmt.Errorf("unable to assign configmap based client CA file: %v", err) + } + + } + } + + requestHeaderCAFileSpecified := len(s.RequestHeader.ClientCAFile) > 0 + var requestHeaderConfig *authenticatorfactory.RequestHeaderConfig + if requestHeaderCAFileSpecified { + requestHeaderConfig, err = s.RequestHeader.ToAuthenticationRequestHeaderConfig() + if err != nil { + return fmt.Errorf("unable to create request header authentication config: %v", err) + } + + } else if !s.SkipInClusterLookup { + if client == nil { + klog.Warningf("No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/%s in %s, so request-header client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + } else { + requestHeaderConfig, err = s.createRequestHeaderConfig(client) + if err != nil { + if s.TolerateInClusterLookupFailure { + klog.Warningf("Error looking up in-cluster authentication configuration: %v", err) + klog.Warningf("Continuing without authentication configuration. This may treat all requests as anonymous.") + klog.Warningf("To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false") + } else { + return fmt.Errorf("unable to load configmap based request-header-client-ca-file: %v", err) + } + } + } + } + if requestHeaderConfig != nil { + cfg.RequestHeaderConfig = requestHeaderConfig + if err = authenticationInfo.ApplyClientCert(cfg.RequestHeaderConfig.CAContentProvider, servingInfo); err != nil { + return fmt.Errorf("unable to load request-header-client-ca-file: %v", err) + } + } + + // create authenticator + authenticator, securityDefinitions, err := cfg.New() + if err != nil { + return err + } + authenticationInfo.Authenticator = authenticator + if openAPIConfig != nil { + openAPIConfig.SecurityDefinitions = securityDefinitions + } + + return nil +} + +const ( + authenticationConfigMapNamespace = metav1.NamespaceSystem + // authenticationConfigMapName is the name of ConfigMap in the kube-system namespace holding the root certificate + // bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified + // by --requestheader-username-headers. This is created in the cluster by the kube-apiserver. + // "WARNING: generally do not depend on authorization being already done for incoming requests.") + authenticationConfigMapName = "extension-apiserver-authentication" +) + +func (s *DelegatingAuthenticationOptions) createRequestHeaderConfig(client kubernetes.Interface) (*authenticatorfactory.RequestHeaderConfig, error) { + dynamicRequestHeaderProvider, err := newDynamicRequestHeaderController(client) + if err != nil { + return nil, fmt.Errorf("unable to create request header authentication config: %v", err) + } + + // look up authentication configuration in the cluster and in case of an err defer to authentication-tolerate-lookup-failure flag + if err := dynamicRequestHeaderProvider.RunOnce(); err != nil { + return nil, err + } + + return &authenticatorfactory.RequestHeaderConfig{ + CAContentProvider: dynamicRequestHeaderProvider, + UsernameHeaders: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.UsernameHeaders)), + GroupHeaders: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.GroupHeaders)), + ExtraHeaderPrefixes: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.ExtraHeaderPrefixes)), + AllowedClientNames: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.AllowedClientNames)), + }, nil +} + +// getClient returns a Kubernetes clientset. If s.RemoteKubeConfigFileOptional is true, nil will be returned +// if no kubeconfig is specified by the user and the in-cluster config is not found. +func (s *DelegatingAuthenticationOptions) getClient() (kubernetes.Interface, error) { + var clientConfig *rest.Config + var err error + if len(s.RemoteKubeConfigFile) > 0 { + loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.RemoteKubeConfigFile} + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + + clientConfig, err = loader.ClientConfig() + } else { + // without the remote kubeconfig file, try to use the in-cluster config. Most addon API servers will + // use this path. If it is optional, ignore errors. + clientConfig, err = rest.InClusterConfig() + if err != nil && s.RemoteKubeConfigFileOptional { + if err != rest.ErrNotInCluster { + klog.Warningf("failed to read in-cluster kubeconfig for delegated authentication: %v", err) + } + return nil, nil + } + } + if err != nil { + return nil, fmt.Errorf("failed to get delegated authentication kubeconfig: %v", err) + } + + // set high qps/burst limits since this will effectively limit API server responsiveness + clientConfig.QPS = 200 + clientConfig.Burst = 400 + clientConfig.Timeout = s.ClientTimeout + + return kubernetes.NewForConfig(clientConfig) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go b/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go new file mode 100644 index 0000000000..5c558b06d6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go @@ -0,0 +1,79 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/authentication/request/headerrequest" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + "k8s.io/client-go/kubernetes" +) + +var _ dynamiccertificates.ControllerRunner = &DynamicRequestHeaderController{} +var _ dynamiccertificates.Notifier = &DynamicRequestHeaderController{} +var _ dynamiccertificates.CAContentProvider = &DynamicRequestHeaderController{} + +var _ headerrequest.RequestHeaderAuthRequestProvider = &DynamicRequestHeaderController{} + +// DynamicRequestHeaderController combines DynamicCAFromConfigMapController and RequestHeaderAuthRequestController +// into one controller for dynamically filling RequestHeaderConfig struct +type DynamicRequestHeaderController struct { + *dynamiccertificates.ConfigMapCAController + *headerrequest.RequestHeaderAuthRequestController +} + +// newDynamicRequestHeaderController creates a new controller that implements DynamicRequestHeaderController +func newDynamicRequestHeaderController(client kubernetes.Interface) (*DynamicRequestHeaderController, error) { + requestHeaderCAController, err := dynamiccertificates.NewDynamicCAFromConfigMapController( + "client-ca", + authenticationConfigMapNamespace, + authenticationConfigMapName, + "requestheader-client-ca-file", + client) + if err != nil { + return nil, fmt.Errorf("unable to create DynamicCAFromConfigMap controller: %v", err) + } + + requestHeaderAuthRequestController := headerrequest.NewRequestHeaderAuthRequestController( + authenticationConfigMapName, + authenticationConfigMapNamespace, + client, + "requestheader-username-headers", + "requestheader-group-headers", + "requestheader-extra-headers-prefix", + "requestheader-allowed-names", + ) + return &DynamicRequestHeaderController{ + ConfigMapCAController: requestHeaderCAController, + RequestHeaderAuthRequestController: requestHeaderAuthRequestController, + }, nil +} + +func (c *DynamicRequestHeaderController) RunOnce() error { + errs := []error{} + errs = append(errs, c.ConfigMapCAController.RunOnce()) + errs = append(errs, c.RequestHeaderAuthRequestController.RunOnce()) + return errors.NewAggregate(errs) +} + +func (c *DynamicRequestHeaderController) Run(workers int, stopCh <-chan struct{}) { + go c.ConfigMapCAController.Run(workers, stopCh) + go c.RequestHeaderAuthRequestController.Run(workers, stopCh) + <-stopCh +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authorization.go b/vendor/k8s.io/apiserver/pkg/server/options/authorization.go new file mode 100644 index 0000000000..04523e8f29 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/authorization.go @@ -0,0 +1,220 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "time" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/authorization/authorizerfactory" + "k8s.io/apiserver/pkg/authorization/path" + "k8s.io/apiserver/pkg/authorization/union" + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// DelegatingAuthorizationOptions provides an easy way for composing API servers to delegate their authorization to +// the root kube API server. +// WARNING: never assume that every authenticated incoming request already does authorization. +// The aggregator in the kube API server does this today, but this behaviour is not +// guaranteed in the future. +type DelegatingAuthorizationOptions struct { + // RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the + // SubjectAccessReview.authorization.k8s.io endpoint for checking tokens. + RemoteKubeConfigFile string + // RemoteKubeConfigFileOptional is specifying whether not specifying the kubeconfig or + // a missing in-cluster config will be fatal. + RemoteKubeConfigFileOptional bool + + // AllowCacheTTL is the length of time that a successful authorization response will be cached + AllowCacheTTL time.Duration + + // DenyCacheTTL is the length of time that an unsuccessful authorization response will be cached. + // You generally want more responsive, "deny, try again" flows. + DenyCacheTTL time.Duration + + // AlwaysAllowPaths are HTTP paths which are excluded from authorization. They can be plain + // paths or end in * in which case prefix-match is applied. A leading / is optional. + AlwaysAllowPaths []string + + // AlwaysAllowGroups are groups which are allowed to take any actions. In kube, this is system:masters. + AlwaysAllowGroups []string + + // ClientTimeout specifies a time limit for requests made by SubjectAccessReviews client. + // The default value is set to 10 seconds. + ClientTimeout time.Duration + + // WebhookRetryBackoff specifies the backoff parameters for the authorization webhook retry logic. + // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed + // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. + WebhookRetryBackoff *wait.Backoff +} + +func NewDelegatingAuthorizationOptions() *DelegatingAuthorizationOptions { + return &DelegatingAuthorizationOptions{ + // very low for responsiveness, but high enough to handle storms + AllowCacheTTL: 10 * time.Second, + DenyCacheTTL: 10 * time.Second, + ClientTimeout: 10 * time.Second, + WebhookRetryBackoff: DefaultAuthWebhookRetryBackoff(), + } +} + +// WithAlwaysAllowGroups appends the list of paths to AlwaysAllowGroups +func (s *DelegatingAuthorizationOptions) WithAlwaysAllowGroups(groups ...string) *DelegatingAuthorizationOptions { + s.AlwaysAllowGroups = append(s.AlwaysAllowGroups, groups...) + return s +} + +// WithAlwaysAllowPaths appends the list of paths to AlwaysAllowPaths +func (s *DelegatingAuthorizationOptions) WithAlwaysAllowPaths(paths ...string) *DelegatingAuthorizationOptions { + s.AlwaysAllowPaths = append(s.AlwaysAllowPaths, paths...) + return s +} + +// WithClientTimeout sets the given timeout for SAR client used by this authorizer +func (s *DelegatingAuthorizationOptions) WithClientTimeout(timeout time.Duration) { + s.ClientTimeout = timeout +} + +// WithCustomRetryBackoff sets the custom backoff parameters for the authorization webhook retry logic. +func (s *DelegatingAuthorizationOptions) WithCustomRetryBackoff(backoff wait.Backoff) { + s.WebhookRetryBackoff = &backoff +} + +func (s *DelegatingAuthorizationOptions) Validate() []error { + allErrors := []error{} + + if s.WebhookRetryBackoff != nil && s.WebhookRetryBackoff.Steps <= 0 { + allErrors = append(allErrors, fmt.Errorf("number of webhook retry attempts must be greater than 1, but is: %d", s.WebhookRetryBackoff.Steps)) + } + + return allErrors +} + +func (s *DelegatingAuthorizationOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + var optionalKubeConfigSentence string + if s.RemoteKubeConfigFileOptional { + optionalKubeConfigSentence = " This is optional. If empty, all requests not skipped by authorization are forbidden." + } + fs.StringVar(&s.RemoteKubeConfigFile, "authorization-kubeconfig", s.RemoteKubeConfigFile, + "kubeconfig file pointing at the 'core' kubernetes server with enough rights to create "+ + "subjectaccessreviews.authorization.k8s.io."+optionalKubeConfigSentence) + + fs.DurationVar(&s.AllowCacheTTL, "authorization-webhook-cache-authorized-ttl", + s.AllowCacheTTL, + "The duration to cache 'authorized' responses from the webhook authorizer.") + + fs.DurationVar(&s.DenyCacheTTL, + "authorization-webhook-cache-unauthorized-ttl", s.DenyCacheTTL, + "The duration to cache 'unauthorized' responses from the webhook authorizer.") + + fs.StringSliceVar(&s.AlwaysAllowPaths, "authorization-always-allow-paths", s.AlwaysAllowPaths, + "A list of HTTP paths to skip during authorization, i.e. these are authorized without "+ + "contacting the 'core' kubernetes server.") +} + +func (s *DelegatingAuthorizationOptions) ApplyTo(c *server.AuthorizationInfo) error { + if s == nil { + c.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer() + return nil + } + + client, err := s.getClient() + if err != nil { + return err + } + + c.Authorizer, err = s.toAuthorizer(client) + return err +} + +func (s *DelegatingAuthorizationOptions) toAuthorizer(client kubernetes.Interface) (authorizer.Authorizer, error) { + var authorizers []authorizer.Authorizer + + if len(s.AlwaysAllowGroups) > 0 { + authorizers = append(authorizers, authorizerfactory.NewPrivilegedGroups(s.AlwaysAllowGroups...)) + } + + if len(s.AlwaysAllowPaths) > 0 { + a, err := path.NewAuthorizer(s.AlwaysAllowPaths) + if err != nil { + return nil, err + } + authorizers = append(authorizers, a) + } + + if client == nil { + klog.Warningf("No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work.") + } else { + cfg := authorizerfactory.DelegatingAuthorizerConfig{ + SubjectAccessReviewClient: client.AuthorizationV1().SubjectAccessReviews(), + AllowCacheTTL: s.AllowCacheTTL, + DenyCacheTTL: s.DenyCacheTTL, + WebhookRetryBackoff: s.WebhookRetryBackoff, + } + delegatedAuthorizer, err := cfg.New() + if err != nil { + return nil, err + } + authorizers = append(authorizers, delegatedAuthorizer) + } + + return union.New(authorizers...), nil +} + +func (s *DelegatingAuthorizationOptions) getClient() (kubernetes.Interface, error) { + var clientConfig *rest.Config + var err error + if len(s.RemoteKubeConfigFile) > 0 { + loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.RemoteKubeConfigFile} + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + + clientConfig, err = loader.ClientConfig() + } else { + // without the remote kubeconfig file, try to use the in-cluster config. Most addon API servers will + // use this path. If it is optional, ignore errors. + clientConfig, err = rest.InClusterConfig() + if err != nil && s.RemoteKubeConfigFileOptional { + if err != rest.ErrNotInCluster { + klog.Warningf("failed to read in-cluster kubeconfig for delegated authorization: %v", err) + } + return nil, nil + } + } + if err != nil { + return nil, fmt.Errorf("failed to get delegated authorization kubeconfig: %v", err) + } + + // set high qps/burst limits since this will effectively limit API server responsiveness + clientConfig.QPS = 200 + clientConfig.Burst = 400 + clientConfig.Timeout = s.ClientTimeout + + return kubernetes.NewForConfig(clientConfig) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go b/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go new file mode 100644 index 0000000000..d46dece4a7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go @@ -0,0 +1,84 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "time" + + "github.com/spf13/pflag" + "k8s.io/apiserver/pkg/server" + clientgoinformers "k8s.io/client-go/informers" + clientgoclientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// CoreAPIOptions contains options to configure the connection to a core API Kubernetes apiserver. +type CoreAPIOptions struct { + // CoreAPIKubeconfigPath is a filename for a kubeconfig file to contact the core API server with. + // If it is not set, the in cluster config is used. + CoreAPIKubeconfigPath string +} + +func NewCoreAPIOptions() *CoreAPIOptions { + return &CoreAPIOptions{} +} + +func (o *CoreAPIOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.StringVar(&o.CoreAPIKubeconfigPath, "kubeconfig", o.CoreAPIKubeconfigPath, + "kubeconfig file pointing at the 'core' kubernetes server.") +} + +func (o *CoreAPIOptions) ApplyTo(config *server.RecommendedConfig) error { + if o == nil { + return nil + } + + // create shared informer for Kubernetes APIs + var kubeconfig *rest.Config + var err error + if len(o.CoreAPIKubeconfigPath) > 0 { + loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: o.CoreAPIKubeconfigPath} + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + kubeconfig, err = loader.ClientConfig() + if err != nil { + return fmt.Errorf("failed to load kubeconfig at %q: %v", o.CoreAPIKubeconfigPath, err) + } + } else { + kubeconfig, err = rest.InClusterConfig() + if err != nil { + return err + } + } + clientgoExternalClient, err := clientgoclientset.NewForConfig(kubeconfig) + if err != nil { + return fmt.Errorf("failed to create Kubernetes clientset: %v", err) + } + config.ClientConfig = kubeconfig + config.SharedInformerFactory = clientgoinformers.NewSharedInformerFactory(clientgoExternalClient, 10*time.Minute) + + return nil +} + +func (o *CoreAPIOptions) Validate() []error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go b/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go new file mode 100644 index 0000000000..1c066313c2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go @@ -0,0 +1,169 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "net" + + "github.com/spf13/pflag" + + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/rest" +) + +// DeprecatedInsecureServingOptions are for creating an unauthenticated, unauthorized, insecure port. +// No one should be using these anymore. +// DEPRECATED: all insecure serving options are removed in a future version +type DeprecatedInsecureServingOptions struct { + BindAddress net.IP + BindPort int + // BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp", + // "tcp4", and "tcp6". + BindNetwork string + + // Listener is the secure server network listener. + // either Listener or BindAddress/BindPort/BindNetwork is set, + // if Listener is set, use it and omit BindAddress/BindPort/BindNetwork. + Listener net.Listener + + // ListenFunc can be overridden to create a custom listener, e.g. for mocking in tests. + // It defaults to options.CreateListener. + ListenFunc func(network, addr string, config net.ListenConfig) (net.Listener, int, error) +} + +// Validate ensures that the insecure port values within the range of the port. +func (s *DeprecatedInsecureServingOptions) Validate() []error { + if s == nil { + return nil + } + + errors := []error{} + + if s.BindPort < 0 || s.BindPort > 65535 { + errors = append(errors, fmt.Errorf("insecure port %v must be between 0 and 65535, inclusive. 0 for turning off insecure (HTTP) port", s.BindPort)) + } + + return errors +} + +// AddFlags adds flags related to insecure serving to the specified FlagSet. +func (s *DeprecatedInsecureServingOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.IPVar(&s.BindAddress, "insecure-bind-address", s.BindAddress, ""+ + "The IP address on which to serve the --insecure-port (set to 0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces).") + // Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784 + fs.MarkDeprecated("insecure-bind-address", "This flag will be removed in a future version.") + fs.Lookup("insecure-bind-address").Hidden = false + + fs.IntVar(&s.BindPort, "insecure-port", s.BindPort, ""+ + "The port on which to serve unsecured, unauthenticated access.") + // Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784 + fs.MarkDeprecated("insecure-port", "This flag will be removed in a future version.") + fs.Lookup("insecure-port").Hidden = false +} + +// AddUnqualifiedFlags adds flags related to insecure serving without the --insecure prefix to the specified FlagSet. +func (s *DeprecatedInsecureServingOptions) AddUnqualifiedFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.IPVar(&s.BindAddress, "address", s.BindAddress, + "The IP address on which to serve the insecure --port (set to 0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces).") + fs.MarkDeprecated("address", "see --bind-address instead.") + fs.Lookup("address").Hidden = false + + fs.IntVar(&s.BindPort, "port", s.BindPort, "The port on which to serve unsecured, unauthenticated access. Set to 0 to disable.") + fs.MarkDeprecated("port", "see --secure-port instead.") + fs.Lookup("port").Hidden = false +} + +// ApplyTo adds DeprecatedInsecureServingOptions to the insecureserverinfo and kube-controller manager configuration. +// Note: the double pointer allows to set the *DeprecatedInsecureServingInfo to nil without referencing the struct hosting this pointer. +func (s *DeprecatedInsecureServingOptions) ApplyTo(c **server.DeprecatedInsecureServingInfo) error { + if s == nil { + return nil + } + if s.BindPort <= 0 { + return nil + } + + if s.Listener == nil { + var err error + listen := CreateListener + if s.ListenFunc != nil { + listen = s.ListenFunc + } + addr := net.JoinHostPort(s.BindAddress.String(), fmt.Sprintf("%d", s.BindPort)) + s.Listener, s.BindPort, err = listen(s.BindNetwork, addr, net.ListenConfig{}) + if err != nil { + return fmt.Errorf("failed to create listener: %v", err) + } + } + + *c = &server.DeprecatedInsecureServingInfo{ + Listener: s.Listener, + } + + return nil +} + +// WithLoopback adds loopback functionality to the serving options. +func (o *DeprecatedInsecureServingOptions) WithLoopback() *DeprecatedInsecureServingOptionsWithLoopback { + return &DeprecatedInsecureServingOptionsWithLoopback{o} +} + +// DeprecatedInsecureServingOptionsWithLoopback adds loopback functionality to the DeprecatedInsecureServingOptions. +// DEPRECATED: all insecure serving options will be removed in a future version, however note that +// there are security concerns over how health checks can work here - see e.g. #43784 +type DeprecatedInsecureServingOptionsWithLoopback struct { + *DeprecatedInsecureServingOptions +} + +// ApplyTo fills up serving information in the server configuration. +func (s *DeprecatedInsecureServingOptionsWithLoopback) ApplyTo(insecureServingInfo **server.DeprecatedInsecureServingInfo, loopbackClientConfig **rest.Config) error { + if s == nil || s.DeprecatedInsecureServingOptions == nil || insecureServingInfo == nil { + return nil + } + + if err := s.DeprecatedInsecureServingOptions.ApplyTo(insecureServingInfo); err != nil { + return err + } + + if *insecureServingInfo == nil || loopbackClientConfig == nil { + return nil + } + + secureLoopbackClientConfig, err := (*insecureServingInfo).NewLoopbackClientConfig() + switch { + // if we failed and there's no fallback loopback client config, we need to fail + case err != nil && *loopbackClientConfig == nil: + return err + + // if we failed, but we already have a fallback loopback client config (usually insecure), allow it + case err != nil && *loopbackClientConfig != nil: + + default: + *loopbackClientConfig = secureLoopbackClientConfig + } + + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/doc.go b/vendor/k8s.io/apiserver/pkg/server/options/doc.go new file mode 100644 index 0000000000..426336be09 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package options is the public flags and options used by a generic api +// server. It takes a minimal set of dependencies and does not reference +// implementations, in order to ensure it may be reused by multiple components +// (such as CLI commands that wish to generate or validate config). +package options // import "k8s.io/apiserver/pkg/server/options" diff --git a/vendor/k8s.io/apiserver/pkg/server/options/egress_selector.go b/vendor/k8s.io/apiserver/pkg/server/options/egress_selector.go new file mode 100644 index 0000000000..07999cab11 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/egress_selector.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/utils/path" + + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/egressselector" +) + +// EgressSelectorOptions holds the api server egress selector options. +// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190226-network-proxy.md +type EgressSelectorOptions struct { + // ConfigFile is the file path with api-server egress selector configuration. + ConfigFile string +} + +// NewEgressSelectorOptions creates a new instance of EgressSelectorOptions +// +// The option is to point to a configuration file for egress/konnectivity. +// This determines which types of requests use egress/konnectivity and how they use it. +// If empty the API Server will attempt to connect directly using the network. +func NewEgressSelectorOptions() *EgressSelectorOptions { + return &EgressSelectorOptions{} +} + +// AddFlags adds flags related to admission for a specific APIServer to the specified FlagSet +func (o *EgressSelectorOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.StringVar(&o.ConfigFile, "egress-selector-config-file", o.ConfigFile, + "File with apiserver egress selector configuration.") +} + +// ApplyTo adds the egress selector settings to the server configuration. +// In case egress selector settings were not provided by a cluster-admin +// they will be prepared from the recommended/default/no-op values. +func (o *EgressSelectorOptions) ApplyTo(c *server.Config) error { + if o == nil { + return nil + } + + npConfig, err := egressselector.ReadEgressSelectorConfiguration(o.ConfigFile) + if err != nil { + return fmt.Errorf("failed to read egress selector config: %v", err) + } + errs := egressselector.ValidateEgressSelectorConfiguration(npConfig) + if len(errs) > 0 { + return fmt.Errorf("failed to validate egress selector configuration: %v", errs.ToAggregate()) + } + + cs, err := egressselector.NewEgressSelector(npConfig) + if err != nil { + return fmt.Errorf("failed to setup egress selector with config %#v: %v", npConfig, err) + } + c.EgressSelector = cs + return nil +} + +// Validate verifies flags passed to EgressSelectorOptions. +func (o *EgressSelectorOptions) Validate() []error { + if o == nil || o.ConfigFile == "" { + return nil + } + + errs := []error{} + + if exists, err := path.Exists(path.CheckFollowSymlink, o.ConfigFile); !exists || err != nil { + errs = append(errs, fmt.Errorf("egress-selector-config-file %s does not exist", o.ConfigFile)) + } + + return errs +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/OWNERS b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/OWNERS new file mode 100644 index 0000000000..71edc3ecdc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-auth-encryption-at-rest-approvers +reviewers: +- sig-auth-encryption-at-rest-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go new file mode 100644 index 0000000000..372ab5eb8a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -0,0 +1,375 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package encryptionconfig + +import ( + "crypto/aes" + "crypto/cipher" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + apiserverconfig "k8s.io/apiserver/pkg/apis/config" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/apiserver/pkg/apis/config/validation" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/apiserver/pkg/storage/value" + aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes" + "k8s.io/apiserver/pkg/storage/value/encrypt/envelope" + "k8s.io/apiserver/pkg/storage/value/encrypt/identity" + "k8s.io/apiserver/pkg/storage/value/encrypt/secretbox" +) + +const ( + aesCBCTransformerPrefixV1 = "k8s:enc:aescbc:v1:" + aesGCMTransformerPrefixV1 = "k8s:enc:aesgcm:v1:" + secretboxTransformerPrefixV1 = "k8s:enc:secretbox:v1:" + kmsTransformerPrefixV1 = "k8s:enc:kms:v1:" + kmsPluginHealthzNegativeTTL = 3 * time.Second + kmsPluginHealthzPositiveTTL = 20 * time.Second +) + +type kmsPluginHealthzResponse struct { + err error + received time.Time +} + +type kmsPluginProbe struct { + name string + ttl time.Duration + envelope.Service + lastResponse *kmsPluginHealthzResponse + l *sync.Mutex +} + +func (h *kmsPluginProbe) toHealthzCheck(idx int) healthz.HealthChecker { + return healthz.NamedCheck(fmt.Sprintf("kms-provider-%d", idx), func(r *http.Request) error { + return h.Check() + }) +} + +// GetKMSPluginHealthzCheckers extracts KMSPluginProbes from the EncryptionConfig. +func GetKMSPluginHealthzCheckers(filepath string) ([]healthz.HealthChecker, error) { + f, err := os.Open(filepath) + if err != nil { + return nil, fmt.Errorf("error opening encryption provider configuration file %q: %v", filepath, err) + } + defer f.Close() + var result []healthz.HealthChecker + probes, err := getKMSPluginProbes(f) + if err != nil { + return nil, err + } + + for i, p := range probes { + probe := p + result = append(result, probe.toHealthzCheck(i)) + } + return result, nil +} + +func getKMSPluginProbes(reader io.Reader) ([]*kmsPluginProbe, error) { + var result []*kmsPluginProbe + + configFileContents, err := ioutil.ReadAll(reader) + if err != nil { + return result, fmt.Errorf("could not read content of encryption provider configuration: %v", err) + } + + config, err := loadConfig(configFileContents) + if err != nil { + return result, fmt.Errorf("error while parsing encrypiton provider configuration: %v", err) + } + + for _, r := range config.Resources { + for _, p := range r.Providers { + if p.KMS != nil { + s, err := envelope.NewGRPCService(p.KMS.Endpoint, p.KMS.Timeout.Duration) + if err != nil { + return nil, fmt.Errorf("could not configure KMS-Plugin's probe %q, error: %v", p.KMS.Name, err) + } + + result = append(result, &kmsPluginProbe{ + name: p.KMS.Name, + ttl: kmsPluginHealthzNegativeTTL, + Service: s, + l: &sync.Mutex{}, + lastResponse: &kmsPluginHealthzResponse{}, + }) + } + } + } + + return result, nil +} + +// Check encrypts and decrypts test data against KMS-Plugin's gRPC endpoint. +func (h *kmsPluginProbe) Check() error { + h.l.Lock() + defer h.l.Unlock() + + if (time.Since(h.lastResponse.received)) < h.ttl { + return h.lastResponse.err + } + + p, err := h.Service.Encrypt([]byte("ping")) + if err != nil { + h.lastResponse = &kmsPluginHealthzResponse{err: err, received: time.Now()} + h.ttl = kmsPluginHealthzNegativeTTL + return fmt.Errorf("failed to perform encrypt section of the healthz check for KMS Provider %s, error: %v", h.name, err) + } + + if _, err := h.Service.Decrypt(p); err != nil { + h.lastResponse = &kmsPluginHealthzResponse{err: err, received: time.Now()} + h.ttl = kmsPluginHealthzNegativeTTL + return fmt.Errorf("failed to perform decrypt section of the healthz check for KMS Provider %s, error: %v", h.name, err) + } + + h.lastResponse = &kmsPluginHealthzResponse{err: nil, received: time.Now()} + h.ttl = kmsPluginHealthzPositiveTTL + return nil +} + +// GetTransformerOverrides returns the transformer overrides by reading and parsing the encryption provider configuration file +func GetTransformerOverrides(filepath string) (map[schema.GroupResource]value.Transformer, error) { + f, err := os.Open(filepath) + if err != nil { + return nil, fmt.Errorf("error opening encryption provider configuration file %q: %v", filepath, err) + } + defer f.Close() + + result, err := parseEncryptionConfiguration(f) + if err != nil { + return nil, fmt.Errorf("error while parsing encryption provider configuration file %q: %v", filepath, err) + } + return result, nil +} + +func parseEncryptionConfiguration(f io.Reader) (map[schema.GroupResource]value.Transformer, error) { + configFileContents, err := ioutil.ReadAll(f) + if err != nil { + return nil, fmt.Errorf("could not read contents: %v", err) + } + + config, err := loadConfig(configFileContents) + if err != nil { + return nil, fmt.Errorf("error while parsing file: %v", err) + } + + resourceToPrefixTransformer := map[schema.GroupResource][]value.PrefixTransformer{} + + // For each entry in the configuration + for _, resourceConfig := range config.Resources { + transformers, err := prefixTransformers(&resourceConfig) + if err != nil { + return nil, err + } + + // For each resource, create a list of providers to use + for _, resource := range resourceConfig.Resources { + gr := schema.ParseGroupResource(resource) + resourceToPrefixTransformer[gr] = append( + resourceToPrefixTransformer[gr], transformers...) + } + } + + result := map[schema.GroupResource]value.Transformer{} + for gr, transList := range resourceToPrefixTransformer { + result[gr] = value.NewMutableTransformer(value.NewPrefixTransformers(fmt.Errorf("no matching prefix found"), transList...)) + } + return result, nil + +} + +func loadConfig(data []byte) (*apiserverconfig.EncryptionConfiguration, error) { + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + apiserverconfig.AddToScheme(scheme) + apiserverconfigv1.AddToScheme(scheme) + + configObj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil) + if err != nil { + return nil, err + } + config, ok := configObj.(*apiserverconfig.EncryptionConfiguration) + if !ok { + return nil, fmt.Errorf("got unexpected config type: %v", gvk) + } + + return config, validation.ValidateEncryptionConfiguration(config).ToAggregate() +} + +// The factory to create kms service. This is to make writing test easier. +var envelopeServiceFactory = envelope.NewGRPCService + +func prefixTransformers(config *apiserverconfig.ResourceConfiguration) ([]value.PrefixTransformer, error) { + var result []value.PrefixTransformer + for _, provider := range config.Providers { + var ( + transformer value.PrefixTransformer + err error + ) + + switch { + case provider.AESGCM != nil: + transformer, err = aesPrefixTransformer(provider.AESGCM, aestransformer.NewGCMTransformer, aesGCMTransformerPrefixV1) + case provider.AESCBC != nil: + transformer, err = aesPrefixTransformer(provider.AESCBC, aestransformer.NewCBCTransformer, aesCBCTransformerPrefixV1) + case provider.Secretbox != nil: + transformer, err = secretboxPrefixTransformer(provider.Secretbox) + case provider.KMS != nil: + envelopeService, err := envelopeServiceFactory(provider.KMS.Endpoint, provider.KMS.Timeout.Duration) + if err != nil { + return nil, fmt.Errorf("could not configure KMS plugin %q, error: %v", provider.KMS.Name, err) + } + + transformer, err = envelopePrefixTransformer(provider.KMS, envelopeService, kmsTransformerPrefixV1) + case provider.Identity != nil: + transformer = value.PrefixTransformer{ + Transformer: identity.NewEncryptCheckTransformer(), + Prefix: []byte{}, + } + default: + return nil, errors.New("provider does not contain any of the expected providers: KMS, AESGCM, AESCBC, Secretbox, Identity") + } + + if err != nil { + return result, err + } + result = append(result, transformer) + } + return result, nil +} + +type blockTransformerFunc func(cipher.Block) value.Transformer + +func aesPrefixTransformer(config *apiserverconfig.AESConfiguration, fn blockTransformerFunc, prefix string) (value.PrefixTransformer, error) { + var result value.PrefixTransformer + + if len(config.Keys) == 0 { + return result, fmt.Errorf("aes provider has no valid keys") + } + for _, key := range config.Keys { + if key.Name == "" { + return result, fmt.Errorf("key with invalid name provided") + } + if key.Secret == "" { + return result, fmt.Errorf("key %v has no provided secret", key.Name) + } + } + + keyTransformers := []value.PrefixTransformer{} + + for _, keyData := range config.Keys { + key, err := base64.StdEncoding.DecodeString(keyData.Secret) + if err != nil { + return result, fmt.Errorf("could not obtain secret for named key %s: %s", keyData.Name, err) + } + block, err := aes.NewCipher(key) + if err != nil { + return result, fmt.Errorf("error while creating cipher for named key %s: %s", keyData.Name, err) + } + + // Create a new PrefixTransformer for this key + keyTransformers = append(keyTransformers, + value.PrefixTransformer{ + Transformer: fn(block), + Prefix: []byte(keyData.Name + ":"), + }) + } + + // Create a prefixTransformer which can choose between these keys + keyTransformer := value.NewPrefixTransformers( + fmt.Errorf("no matching key was found for the provided AES transformer"), keyTransformers...) + + // Create a PrefixTransformer which shall later be put in a list with other providers + result = value.PrefixTransformer{ + Transformer: keyTransformer, + Prefix: []byte(prefix), + } + return result, nil +} + +func secretboxPrefixTransformer(config *apiserverconfig.SecretboxConfiguration) (value.PrefixTransformer, error) { + var result value.PrefixTransformer + + if len(config.Keys) == 0 { + return result, fmt.Errorf("secretbox provider has no valid keys") + } + for _, key := range config.Keys { + if key.Name == "" { + return result, fmt.Errorf("key with invalid name provided") + } + if key.Secret == "" { + return result, fmt.Errorf("key %v has no provided secret", key.Name) + } + } + + keyTransformers := []value.PrefixTransformer{} + + for _, keyData := range config.Keys { + key, err := base64.StdEncoding.DecodeString(keyData.Secret) + if err != nil { + return result, fmt.Errorf("could not obtain secret for named key %s: %s", keyData.Name, err) + } + + if len(key) != 32 { + return result, fmt.Errorf("expected key size 32 for secretbox provider, got %v", len(key)) + } + + keyArray := [32]byte{} + copy(keyArray[:], key) + + // Create a new PrefixTransformer for this key + keyTransformers = append(keyTransformers, + value.PrefixTransformer{ + Transformer: secretbox.NewSecretboxTransformer(keyArray), + Prefix: []byte(keyData.Name + ":"), + }) + } + + // Create a prefixTransformer which can choose between these keys + keyTransformer := value.NewPrefixTransformers( + fmt.Errorf("no matching key was found for the provided Secretbox transformer"), keyTransformers...) + + // Create a PrefixTransformer which shall later be put in a list with other providers + result = value.PrefixTransformer{ + Transformer: keyTransformer, + Prefix: []byte(secretboxTransformerPrefixV1), + } + return result, nil +} + +func envelopePrefixTransformer(config *apiserverconfig.KMSConfiguration, envelopeService envelope.Service, prefix string) (value.PrefixTransformer, error) { + envelopeTransformer, err := envelope.NewEnvelopeTransformer(envelopeService, int(*config.CacheSize), aestransformer.NewCBCTransformer) + if err != nil { + return value.PrefixTransformer{}, err + } + return value.PrefixTransformer{ + Transformer: envelopeTransformer, + Prefix: []byte(prefix + config.Name + ":"), + }, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go new file mode 100644 index 0000000000..d9c6f9e54b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go @@ -0,0 +1,329 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/registry/generic" + genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/apiserver/pkg/server/options/encryptionconfig" + serverstorage "k8s.io/apiserver/pkg/server/storage" + "k8s.io/apiserver/pkg/storage/storagebackend" + storagefactory "k8s.io/apiserver/pkg/storage/storagebackend/factory" + "k8s.io/klog/v2" +) + +type EtcdOptions struct { + // The value of Paging on StorageConfig will be overridden by the + // calculated feature gate value. + StorageConfig storagebackend.Config + EncryptionProviderConfigFilepath string + + EtcdServersOverrides []string + + // To enable protobuf as storage format, it is enough + // to set it to "application/vnd.kubernetes.protobuf". + DefaultStorageMediaType string + DeleteCollectionWorkers int + EnableGarbageCollection bool + + // Set EnableWatchCache to false to disable all watch caches + EnableWatchCache bool + // Set DefaultWatchCacheSize to zero to disable watch caches for those resources that have no explicit cache size set + DefaultWatchCacheSize int + // WatchCacheSizes represents override to a given resource + WatchCacheSizes []string +} + +var storageTypes = sets.NewString( + storagebackend.StorageTypeETCD3, +) + +func NewEtcdOptions(backendConfig *storagebackend.Config) *EtcdOptions { + options := &EtcdOptions{ + StorageConfig: *backendConfig, + DefaultStorageMediaType: "application/json", + DeleteCollectionWorkers: 1, + EnableGarbageCollection: true, + EnableWatchCache: true, + DefaultWatchCacheSize: 100, + } + options.StorageConfig.CountMetricPollPeriod = time.Minute + return options +} + +func (s *EtcdOptions) Validate() []error { + if s == nil { + return nil + } + + allErrors := []error{} + if len(s.StorageConfig.Transport.ServerList) == 0 { + allErrors = append(allErrors, fmt.Errorf("--etcd-servers must be specified")) + } + + if s.StorageConfig.Type != storagebackend.StorageTypeUnset && !storageTypes.Has(s.StorageConfig.Type) { + allErrors = append(allErrors, fmt.Errorf("--storage-backend invalid, allowed values: %s. If not specified, it will default to 'etcd3'", strings.Join(storageTypes.List(), ", "))) + } + + for _, override := range s.EtcdServersOverrides { + tokens := strings.Split(override, "#") + if len(tokens) != 2 { + allErrors = append(allErrors, fmt.Errorf("--etcd-servers-overrides invalid, must be of format: group/resource#servers, where servers are URLs, semicolon separated")) + continue + } + + apiresource := strings.Split(tokens[0], "/") + if len(apiresource) != 2 { + allErrors = append(allErrors, fmt.Errorf("--etcd-servers-overrides invalid, must be of format: group/resource#servers, where servers are URLs, semicolon separated")) + continue + } + + } + + return allErrors +} + +// AddEtcdFlags adds flags related to etcd storage for a specific APIServer to the specified FlagSet +func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.StringSliceVar(&s.EtcdServersOverrides, "etcd-servers-overrides", s.EtcdServersOverrides, ""+ + "Per-resource etcd servers overrides, comma separated. The individual override "+ + "format: group/resource#servers, where servers are URLs, semicolon separated.") + + fs.StringVar(&s.DefaultStorageMediaType, "storage-media-type", s.DefaultStorageMediaType, ""+ + "The media type to use to store objects in storage. "+ + "Some resources or storage backends may only support a specific media type and will ignore this setting.") + fs.IntVar(&s.DeleteCollectionWorkers, "delete-collection-workers", s.DeleteCollectionWorkers, + "Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup.") + + fs.BoolVar(&s.EnableGarbageCollection, "enable-garbage-collector", s.EnableGarbageCollection, ""+ + "Enables the generic garbage collector. MUST be synced with the corresponding flag "+ + "of the kube-controller-manager.") + + fs.BoolVar(&s.EnableWatchCache, "watch-cache", s.EnableWatchCache, + "Enable watch caching in the apiserver") + + fs.IntVar(&s.DefaultWatchCacheSize, "default-watch-cache-size", s.DefaultWatchCacheSize, + "Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set.") + + fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, ""+ + "Watch cache size settings for some resources (pods, nodes, etc.), comma separated. "+ + "The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), "+ + "group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, "+ + "and size is a number. It takes effect when watch-cache is enabled. "+ + "Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) "+ + "have system defaults set by heuristics, others default to default-watch-cache-size") + + fs.StringVar(&s.StorageConfig.Type, "storage-backend", s.StorageConfig.Type, + "The storage backend for persistence. Options: 'etcd3' (default).") + + dummyCacheSize := 0 + fs.IntVar(&dummyCacheSize, "deserialization-cache-size", 0, "Number of deserialized json objects to cache in memory.") + fs.MarkDeprecated("deserialization-cache-size", "the deserialization cache was dropped in 1.13 with support for etcd2") + + fs.StringSliceVar(&s.StorageConfig.Transport.ServerList, "etcd-servers", s.StorageConfig.Transport.ServerList, + "List of etcd servers to connect with (scheme://ip:port), comma separated.") + + fs.StringVar(&s.StorageConfig.Prefix, "etcd-prefix", s.StorageConfig.Prefix, + "The prefix to prepend to all resource paths in etcd.") + + fs.StringVar(&s.StorageConfig.Transport.KeyFile, "etcd-keyfile", s.StorageConfig.Transport.KeyFile, + "SSL key file used to secure etcd communication.") + + fs.StringVar(&s.StorageConfig.Transport.CertFile, "etcd-certfile", s.StorageConfig.Transport.CertFile, + "SSL certification file used to secure etcd communication.") + + fs.StringVar(&s.StorageConfig.Transport.TrustedCAFile, "etcd-cafile", s.StorageConfig.Transport.TrustedCAFile, + "SSL Certificate Authority file used to secure etcd communication.") + + fs.StringVar(&s.EncryptionProviderConfigFilepath, "experimental-encryption-provider-config", s.EncryptionProviderConfigFilepath, + "The file containing configuration for encryption providers to be used for storing secrets in etcd") + fs.MarkDeprecated("experimental-encryption-provider-config", "use --encryption-provider-config.") + + fs.StringVar(&s.EncryptionProviderConfigFilepath, "encryption-provider-config", s.EncryptionProviderConfigFilepath, + "The file containing configuration for encryption providers to be used for storing secrets in etcd") + + fs.DurationVar(&s.StorageConfig.CompactionInterval, "etcd-compaction-interval", s.StorageConfig.CompactionInterval, + "The interval of compaction requests. If 0, the compaction request from apiserver is disabled.") + + fs.DurationVar(&s.StorageConfig.CountMetricPollPeriod, "etcd-count-metric-poll-period", s.StorageConfig.CountMetricPollPeriod, ""+ + "Frequency of polling etcd for number of resources per type. 0 disables the metric collection.") + + fs.DurationVar(&s.StorageConfig.DBMetricPollInterval, "etcd-db-metric-poll-interval", s.StorageConfig.DBMetricPollInterval, + "The interval of requests to poll etcd and update metric. 0 disables the metric collection") + + fs.DurationVar(&s.StorageConfig.HealthcheckTimeout, "etcd-healthcheck-timeout", s.StorageConfig.HealthcheckTimeout, + "The timeout to use when checking etcd health.") +} + +func (s *EtcdOptions) ApplyTo(c *server.Config) error { + if s == nil { + return nil + } + if err := s.addEtcdHealthEndpoint(c); err != nil { + return err + } + c.RESTOptionsGetter = &SimpleRestOptionsFactory{Options: *s} + return nil +} + +func (s *EtcdOptions) ApplyWithStorageFactoryTo(factory serverstorage.StorageFactory, c *server.Config) error { + if err := s.addEtcdHealthEndpoint(c); err != nil { + return err + } + c.RESTOptionsGetter = &StorageFactoryRestOptionsFactory{Options: *s, StorageFactory: factory} + return nil +} + +func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error { + healthCheck, err := storagefactory.CreateHealthCheck(s.StorageConfig) + if err != nil { + return err + } + c.AddHealthChecks(healthz.NamedCheck("etcd", func(r *http.Request) error { + return healthCheck() + })) + + if s.EncryptionProviderConfigFilepath != "" { + kmsPluginHealthzChecks, err := encryptionconfig.GetKMSPluginHealthzCheckers(s.EncryptionProviderConfigFilepath) + if err != nil { + return err + } + c.AddHealthChecks(kmsPluginHealthzChecks...) + } + + return nil +} + +type SimpleRestOptionsFactory struct { + Options EtcdOptions +} + +func (f *SimpleRestOptionsFactory) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) { + ret := generic.RESTOptions{ + StorageConfig: &f.Options.StorageConfig, + Decorator: generic.UndecoratedStorage, + EnableGarbageCollection: f.Options.EnableGarbageCollection, + DeleteCollectionWorkers: f.Options.DeleteCollectionWorkers, + ResourcePrefix: resource.Group + "/" + resource.Resource, + CountMetricPollPeriod: f.Options.StorageConfig.CountMetricPollPeriod, + } + if f.Options.EnableWatchCache { + sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) + if err != nil { + return generic.RESTOptions{}, err + } + size, ok := sizes[resource] + if ok && size > 0 { + klog.Warningf("Dropping watch-cache-size for %v - watchCache size is now dynamic", resource) + } + if ok && size <= 0 { + ret.Decorator = generic.UndecoratedStorage + } else { + ret.Decorator = genericregistry.StorageWithCacher() + } + } + return ret, nil +} + +type StorageFactoryRestOptionsFactory struct { + Options EtcdOptions + StorageFactory serverstorage.StorageFactory +} + +func (f *StorageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) { + storageConfig, err := f.StorageFactory.NewConfig(resource) + if err != nil { + return generic.RESTOptions{}, fmt.Errorf("unable to find storage destination for %v, due to %v", resource, err.Error()) + } + + ret := generic.RESTOptions{ + StorageConfig: storageConfig, + Decorator: generic.UndecoratedStorage, + DeleteCollectionWorkers: f.Options.DeleteCollectionWorkers, + EnableGarbageCollection: f.Options.EnableGarbageCollection, + ResourcePrefix: f.StorageFactory.ResourcePrefix(resource), + CountMetricPollPeriod: f.Options.StorageConfig.CountMetricPollPeriod, + } + if f.Options.EnableWatchCache { + sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) + if err != nil { + return generic.RESTOptions{}, err + } + size, ok := sizes[resource] + if ok && size > 0 { + klog.Warningf("Dropping watch-cache-size for %v - watchCache size is now dynamic", resource) + } + if ok && size <= 0 { + ret.Decorator = generic.UndecoratedStorage + } else { + ret.Decorator = genericregistry.StorageWithCacher() + } + } + + return ret, nil +} + +// ParseWatchCacheSizes turns a list of cache size values into a map of group resources +// to requested sizes. +func ParseWatchCacheSizes(cacheSizes []string) (map[schema.GroupResource]int, error) { + watchCacheSizes := make(map[schema.GroupResource]int) + for _, c := range cacheSizes { + tokens := strings.Split(c, "#") + if len(tokens) != 2 { + return nil, fmt.Errorf("invalid value of watch cache size: %s", c) + } + + size, err := strconv.Atoi(tokens[1]) + if err != nil { + return nil, fmt.Errorf("invalid size of watch cache size: %s", c) + } + if size < 0 { + return nil, fmt.Errorf("watch cache size cannot be negative: %s", c) + } + watchCacheSizes[schema.ParseGroupResource(tokens[0])] = size + } + return watchCacheSizes, nil +} + +// WriteWatchCacheSizes turns a map of cache size values into a list of string specifications. +func WriteWatchCacheSizes(watchCacheSizes map[schema.GroupResource]int) ([]string, error) { + var cacheSizes []string + + for resource, size := range watchCacheSizes { + if size < 0 { + return nil, fmt.Errorf("watch cache size cannot be negative for resource %s", resource) + } + cacheSizes = append(cacheSizes, fmt.Sprintf("%s#%d", resource.String(), size)) + } + return cacheSizes, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/feature.go b/vendor/k8s.io/apiserver/pkg/server/options/feature.go new file mode 100644 index 0000000000..235635ea9b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/feature.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apiserver/pkg/server" +) + +type FeatureOptions struct { + EnableProfiling bool + EnableContentionProfiling bool +} + +func NewFeatureOptions() *FeatureOptions { + defaults := server.NewConfig(serializer.CodecFactory{}) + + return &FeatureOptions{ + EnableProfiling: defaults.EnableProfiling, + EnableContentionProfiling: defaults.EnableContentionProfiling, + } +} + +func (o *FeatureOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.BoolVar(&o.EnableProfiling, "profiling", o.EnableProfiling, + "Enable profiling via web interface host:port/debug/pprof/") + fs.BoolVar(&o.EnableContentionProfiling, "contention-profiling", o.EnableContentionProfiling, + "Enable lock contention profiling, if profiling is enabled") + dummy := false + fs.BoolVar(&dummy, "enable-swagger-ui", dummy, "Enables swagger ui on the apiserver at /swagger-ui") + fs.MarkDeprecated("enable-swagger-ui", "swagger 1.2 support has been removed") +} + +func (o *FeatureOptions) ApplyTo(c *server.Config) error { + if o == nil { + return nil + } + + c.EnableProfiling = o.EnableProfiling + c.EnableContentionProfiling = o.EnableContentionProfiling + + return nil +} + +func (o *FeatureOptions) Validate() []error { + if o == nil { + return nil + } + + errs := []error{} + return errs +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go new file mode 100644 index 0000000000..18824083e2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/apiserver/pkg/util/feature" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + "k8s.io/client-go/kubernetes" + "k8s.io/component-base/featuregate" +) + +// RecommendedOptions contains the recommended options for running an API server. +// If you add something to this list, it should be in a logical grouping. +// Each of them can be nil to leave the feature unconfigured on ApplyTo. +type RecommendedOptions struct { + Etcd *EtcdOptions + SecureServing *SecureServingOptionsWithLoopback + Authentication *DelegatingAuthenticationOptions + Authorization *DelegatingAuthorizationOptions + Audit *AuditOptions + Features *FeatureOptions + CoreAPI *CoreAPIOptions + + // FeatureGate is a way to plumb feature gate through if you have them. + FeatureGate featuregate.FeatureGate + // ExtraAdmissionInitializers is called once after all ApplyTo from the options above, to pass the returned + // admission plugin initializers to Admission.ApplyTo. + ExtraAdmissionInitializers func(c *server.RecommendedConfig) ([]admission.PluginInitializer, error) + Admission *AdmissionOptions + // API Server Egress Selector is used to control outbound traffic from the API Server + EgressSelector *EgressSelectorOptions +} + +func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptions { + sso := NewSecureServingOptions() + + // We are composing recommended options for an aggregated api-server, + // whose client is typically a proxy multiplexing many operations --- + // notably including long-running ones --- into one HTTP/2 connection + // into this server. So allow many concurrent operations. + sso.HTTP2MaxStreamsPerConnection = 1000 + + return &RecommendedOptions{ + Etcd: NewEtcdOptions(storagebackend.NewDefaultConfig(prefix, codec)), + SecureServing: sso.WithLoopback(), + Authentication: NewDelegatingAuthenticationOptions(), + Authorization: NewDelegatingAuthorizationOptions(), + Audit: NewAuditOptions(), + Features: NewFeatureOptions(), + CoreAPI: NewCoreAPIOptions(), + // Wired a global by default that sadly people will abuse to have different meanings in different repos. + // Please consider creating your own FeatureGate so you can have a consistent meaning for what a variable contains + // across different repos. Future you will thank you. + FeatureGate: feature.DefaultFeatureGate, + ExtraAdmissionInitializers: func(c *server.RecommendedConfig) ([]admission.PluginInitializer, error) { return nil, nil }, + Admission: NewAdmissionOptions(), + EgressSelector: NewEgressSelectorOptions(), + } +} + +func (o *RecommendedOptions) AddFlags(fs *pflag.FlagSet) { + o.Etcd.AddFlags(fs) + o.SecureServing.AddFlags(fs) + o.Authentication.AddFlags(fs) + o.Authorization.AddFlags(fs) + o.Audit.AddFlags(fs) + o.Features.AddFlags(fs) + o.CoreAPI.AddFlags(fs) + o.Admission.AddFlags(fs) + o.EgressSelector.AddFlags(fs) +} + +// ApplyTo adds RecommendedOptions to the server configuration. +// pluginInitializers can be empty, it is only need for additional initializers. +func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { + if err := o.Etcd.ApplyTo(&config.Config); err != nil { + return err + } + if err := o.SecureServing.ApplyTo(&config.Config.SecureServing, &config.Config.LoopbackClientConfig); err != nil { + return err + } + if err := o.Authentication.ApplyTo(&config.Config.Authentication, config.SecureServing, config.OpenAPIConfig); err != nil { + return err + } + if err := o.Authorization.ApplyTo(&config.Config.Authorization); err != nil { + return err + } + if err := o.Audit.ApplyTo(&config.Config); err != nil { + return err + } + if err := o.Features.ApplyTo(&config.Config); err != nil { + return err + } + if err := o.CoreAPI.ApplyTo(config); err != nil { + return err + } + if initializers, err := o.ExtraAdmissionInitializers(config); err != nil { + return err + } else if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, config.ClientConfig, o.FeatureGate, initializers...); err != nil { + return err + } + if err := o.EgressSelector.ApplyTo(&config.Config); err != nil { + return err + } + if feature.DefaultFeatureGate.Enabled(features.APIPriorityAndFairness) { + config.FlowControl = utilflowcontrol.New( + config.SharedInformerFactory, + kubernetes.NewForConfigOrDie(config.ClientConfig).FlowcontrolV1beta1(), + config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight, + config.RequestTimeout/4, + ) + } + return nil +} + +func (o *RecommendedOptions) Validate() []error { + errors := []error{} + errors = append(errors, o.Etcd.Validate()...) + errors = append(errors, o.SecureServing.Validate()...) + errors = append(errors, o.Authentication.Validate()...) + errors = append(errors, o.Authorization.Validate()...) + errors = append(errors, o.Audit.Validate()...) + errors = append(errors, o.Features.Validate()...) + errors = append(errors, o.CoreAPI.Validate()...) + errors = append(errors, o.Admission.Validate()...) + errors = append(errors, o.EgressSelector.Validate()...) + + return errors +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go new file mode 100644 index 0000000000..1ee7060428 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -0,0 +1,215 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "net" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apiserver/pkg/server" + utilfeature "k8s.io/apiserver/pkg/util/feature" + + "github.com/spf13/pflag" +) + +// ServerRunOptions contains the options while running a generic api server. +type ServerRunOptions struct { + AdvertiseAddress net.IP + + CorsAllowedOriginList []string + ExternalHost string + MaxRequestsInFlight int + MaxMutatingRequestsInFlight int + RequestTimeout time.Duration + GoawayChance float64 + LivezGracePeriod time.Duration + MinRequestTimeout int + ShutdownDelayDuration time.Duration + // We intentionally did not add a flag for this option. Users of the + // apiserver library can wire it to a flag. + JSONPatchMaxCopyBytes int64 + // The limit on the request body size that would be accepted and + // decoded in a write request. 0 means no limit. + // We intentionally did not add a flag for this option. Users of the + // apiserver library can wire it to a flag. + MaxRequestBodyBytes int64 + EnablePriorityAndFairness bool +} + +func NewServerRunOptions() *ServerRunOptions { + defaults := server.NewConfig(serializer.CodecFactory{}) + return &ServerRunOptions{ + MaxRequestsInFlight: defaults.MaxRequestsInFlight, + MaxMutatingRequestsInFlight: defaults.MaxMutatingRequestsInFlight, + RequestTimeout: defaults.RequestTimeout, + LivezGracePeriod: defaults.LivezGracePeriod, + MinRequestTimeout: defaults.MinRequestTimeout, + ShutdownDelayDuration: defaults.ShutdownDelayDuration, + JSONPatchMaxCopyBytes: defaults.JSONPatchMaxCopyBytes, + MaxRequestBodyBytes: defaults.MaxRequestBodyBytes, + EnablePriorityAndFairness: true, + } +} + +// ApplyTo applies the run options to the method receiver and returns self +func (s *ServerRunOptions) ApplyTo(c *server.Config) error { + c.CorsAllowedOriginList = s.CorsAllowedOriginList + c.ExternalAddress = s.ExternalHost + c.MaxRequestsInFlight = s.MaxRequestsInFlight + c.MaxMutatingRequestsInFlight = s.MaxMutatingRequestsInFlight + c.LivezGracePeriod = s.LivezGracePeriod + c.RequestTimeout = s.RequestTimeout + c.GoawayChance = s.GoawayChance + c.MinRequestTimeout = s.MinRequestTimeout + c.ShutdownDelayDuration = s.ShutdownDelayDuration + c.JSONPatchMaxCopyBytes = s.JSONPatchMaxCopyBytes + c.MaxRequestBodyBytes = s.MaxRequestBodyBytes + c.PublicAddress = s.AdvertiseAddress + + return nil +} + +// DefaultAdvertiseAddress sets the field AdvertiseAddress if unset. The field will be set based on the SecureServingOptions. +func (s *ServerRunOptions) DefaultAdvertiseAddress(secure *SecureServingOptions) error { + if secure == nil { + return nil + } + + if s.AdvertiseAddress == nil || s.AdvertiseAddress.IsUnspecified() { + hostIP, err := secure.DefaultExternalAddress() + if err != nil { + return fmt.Errorf("Unable to find suitable network address.error='%v'. "+ + "Try to set the AdvertiseAddress directly or provide a valid BindAddress to fix this.", err) + } + s.AdvertiseAddress = hostIP + } + + return nil +} + +// Validate checks validation of ServerRunOptions +func (s *ServerRunOptions) Validate() []error { + errors := []error{} + + if s.LivezGracePeriod < 0 { + errors = append(errors, fmt.Errorf("--livez-grace-period can not be a negative value")) + } + + if s.MaxRequestsInFlight < 0 { + errors = append(errors, fmt.Errorf("--max-requests-inflight can not be negative value")) + } + if s.MaxMutatingRequestsInFlight < 0 { + errors = append(errors, fmt.Errorf("--max-mutating-requests-inflight can not be negative value")) + } + + if s.RequestTimeout.Nanoseconds() < 0 { + errors = append(errors, fmt.Errorf("--request-timeout can not be negative value")) + } + + if s.GoawayChance < 0 || s.GoawayChance > 0.02 { + errors = append(errors, fmt.Errorf("--goaway-chance can not be less than 0 or greater than 0.02")) + } + + if s.MinRequestTimeout < 0 { + errors = append(errors, fmt.Errorf("--min-request-timeout can not be negative value")) + } + + if s.ShutdownDelayDuration < 0 { + errors = append(errors, fmt.Errorf("--shutdown-delay-duration can not be negative value")) + } + + if s.JSONPatchMaxCopyBytes < 0 { + errors = append(errors, fmt.Errorf("--json-patch-max-copy-bytes can not be negative value")) + } + + if s.MaxRequestBodyBytes < 0 { + errors = append(errors, fmt.Errorf("--max-resource-write-bytes can not be negative value")) + } + + return errors +} + +// AddUniversalFlags adds flags for a specific APIServer to the specified FlagSet +func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { + // Note: the weird ""+ in below lines seems to be the only way to get gofmt to + // arrange these text blocks sensibly. Grrr. + + fs.IPVar(&s.AdvertiseAddress, "advertise-address", s.AdvertiseAddress, ""+ + "The IP address on which to advertise the apiserver to members of the cluster. This "+ + "address must be reachable by the rest of the cluster. If blank, the --bind-address "+ + "will be used. If --bind-address is unspecified, the host's default interface will "+ + "be used.") + + fs.StringSliceVar(&s.CorsAllowedOriginList, "cors-allowed-origins", s.CorsAllowedOriginList, ""+ + "List of allowed origins for CORS, comma separated. An allowed origin can be a regular "+ + "expression to support subdomain matching. If this list is empty CORS will not be enabled.") + + deprecatedTargetRAMMB := 0 + fs.IntVar(&deprecatedTargetRAMMB, "target-ram-mb", deprecatedTargetRAMMB, + "DEPRECATED: Memory limit for apiserver in MB (used to configure sizes of caches, etc.)") + fs.MarkDeprecated("target-ram-mb", "This flag will be removed in v1.23") + + fs.StringVar(&s.ExternalHost, "external-hostname", s.ExternalHost, + "The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery).") + + deprecatedMasterServiceNamespace := metav1.NamespaceDefault + fs.StringVar(&deprecatedMasterServiceNamespace, "master-service-namespace", deprecatedMasterServiceNamespace, ""+ + "DEPRECATED: the namespace from which the Kubernetes master services should be injected into pods.") + + fs.IntVar(&s.MaxRequestsInFlight, "max-requests-inflight", s.MaxRequestsInFlight, ""+ + "The maximum number of non-mutating requests in flight at a given time. When the server exceeds this, "+ + "it rejects requests. Zero for no limit.") + + fs.IntVar(&s.MaxMutatingRequestsInFlight, "max-mutating-requests-inflight", s.MaxMutatingRequestsInFlight, ""+ + "The maximum number of mutating requests in flight at a given time. When the server exceeds this, "+ + "it rejects requests. Zero for no limit.") + + fs.DurationVar(&s.RequestTimeout, "request-timeout", s.RequestTimeout, ""+ + "An optional field indicating the duration a handler must keep a request open before timing "+ + "it out. This is the default request timeout for requests but may be overridden by flags such as "+ + "--min-request-timeout for specific types of requests.") + + fs.Float64Var(&s.GoawayChance, "goaway-chance", s.GoawayChance, ""+ + "To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). "+ + "The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. "+ + "This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. "+ + "Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point.") + + fs.DurationVar(&s.LivezGracePeriod, "livez-grace-period", s.LivezGracePeriod, ""+ + "This option represents the maximum amount of time it should take for apiserver to complete its startup sequence "+ + "and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume "+ + "that unfinished post-start hooks will complete successfully and therefore return true.") + + fs.IntVar(&s.MinRequestTimeout, "min-request-timeout", s.MinRequestTimeout, ""+ + "An optional field indicating the minimum number of seconds a handler must keep "+ + "a request open before timing it out. Currently only honored by the watch request "+ + "handler, which picks a randomized value above this number as the connection timeout, "+ + "to spread out load.") + + fs.BoolVar(&s.EnablePriorityAndFairness, "enable-priority-and-fairness", s.EnablePriorityAndFairness, ""+ + "If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness") + + fs.DurationVar(&s.ShutdownDelayDuration, "shutdown-delay-duration", s.ShutdownDelayDuration, ""+ + "Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez "+ + "will return success, but /readyz immediately returns failure. Graceful termination starts after this delay "+ + "has elapsed. This can be used to allow load balancer to stop sending traffic to this server.") + + utilfeature.DefaultMutableFeatureGate.AddFlag(fs) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving.go b/vendor/k8s.io/apiserver/pkg/server/options/serving.go new file mode 100644 index 0000000000..0dcbbb738f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving.go @@ -0,0 +1,356 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "context" + "fmt" + "net" + "path" + "strconv" + "strings" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/keyutil" + cliflag "k8s.io/component-base/cli/flag" +) + +type SecureServingOptions struct { + BindAddress net.IP + // BindPort is ignored when Listener is set, will serve https even with 0. + BindPort int + // BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp", + // "tcp4", and "tcp6". + BindNetwork string + // Required set to true means that BindPort cannot be zero. + Required bool + // ExternalAddress is the address advertised, even if BindAddress is a loopback. By default this + // is set to BindAddress if the later no loopback, or to the first host interface address. + ExternalAddress net.IP + + // Listener is the secure server network listener. + // either Listener or BindAddress/BindPort/BindNetwork is set, + // if Listener is set, use it and omit BindAddress/BindPort/BindNetwork. + Listener net.Listener + + // ServerCert is the TLS cert info for serving secure traffic + ServerCert GeneratableKeyCert + // SNICertKeys are named CertKeys for serving secure traffic with SNI support. + SNICertKeys []cliflag.NamedCertKey + // CipherSuites is the list of allowed cipher suites for the server. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + CipherSuites []string + // MinTLSVersion is the minimum TLS version supported. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + MinTLSVersion string + + // HTTP2MaxStreamsPerConnection is the limit that the api server imposes on each client. + // A value of zero means to use the default provided by golang's HTTP/2 support. + HTTP2MaxStreamsPerConnection int + + // PermitPortSharing controls if SO_REUSEPORT is used when binding the port, which allows + // more than one instance to bind on the same address and port. + PermitPortSharing bool +} + +type CertKey struct { + // CertFile is a file containing a PEM-encoded certificate, and possibly the complete certificate chain + CertFile string + // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + KeyFile string +} + +type GeneratableKeyCert struct { + // CertKey allows setting an explicit cert/key file to use. + CertKey CertKey + + // CertDirectory specifies a directory to write generated certificates to if CertFile/KeyFile aren't explicitly set. + // PairName is used to determine the filenames within CertDirectory. + // If CertDirectory and PairName are not set, an in-memory certificate will be generated. + CertDirectory string + // PairName is the name which will be used with CertDirectory to make a cert and key filenames. + // It becomes CertDirectory/PairName.crt and CertDirectory/PairName.key + PairName string + + // GeneratedCert holds an in-memory generated certificate if CertFile/KeyFile aren't explicitly set, and CertDirectory/PairName are not set. + GeneratedCert dynamiccertificates.CertKeyContentProvider + + // FixtureDirectory is a directory that contains test fixture used to avoid regeneration of certs during tests. + // The format is: + // _-_-.crt + // _-_-.key + FixtureDirectory string +} + +func NewSecureServingOptions() *SecureServingOptions { + return &SecureServingOptions{ + BindAddress: net.ParseIP("0.0.0.0"), + BindPort: 443, + ServerCert: GeneratableKeyCert{ + PairName: "apiserver", + CertDirectory: "apiserver.local.config/certificates", + }, + } +} + +func (s *SecureServingOptions) DefaultExternalAddress() (net.IP, error) { + if s.ExternalAddress != nil && !s.ExternalAddress.IsUnspecified() { + return s.ExternalAddress, nil + } + return utilnet.ResolveBindAddress(s.BindAddress) +} + +func (s *SecureServingOptions) Validate() []error { + if s == nil { + return nil + } + + errors := []error{} + + if s.Required && s.BindPort < 1 || s.BindPort > 65535 { + errors = append(errors, fmt.Errorf("--secure-port %v must be between 1 and 65535, inclusive. It cannot be turned off with 0", s.BindPort)) + } else if s.BindPort < 0 || s.BindPort > 65535 { + errors = append(errors, fmt.Errorf("--secure-port %v must be between 0 and 65535, inclusive. 0 for turning off secure port", s.BindPort)) + } + + if (len(s.ServerCert.CertKey.CertFile) != 0 || len(s.ServerCert.CertKey.KeyFile) != 0) && s.ServerCert.GeneratedCert != nil { + errors = append(errors, fmt.Errorf("cert/key file and in-memory certificate cannot both be set")) + } + + return errors +} + +func (s *SecureServingOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.IPVar(&s.BindAddress, "bind-address", s.BindAddress, ""+ + "The IP address on which to listen for the --secure-port port. The "+ + "associated interface(s) must be reachable by the rest of the cluster, and by CLI/web "+ + "clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used.") + + desc := "The port on which to serve HTTPS with authentication and authorization." + if s.Required { + desc += " It cannot be switched off with 0." + } else { + desc += " If 0, don't serve HTTPS at all." + } + fs.IntVar(&s.BindPort, "secure-port", s.BindPort, desc) + + fs.StringVar(&s.ServerCert.CertDirectory, "cert-dir", s.ServerCert.CertDirectory, ""+ + "The directory where the TLS certs are located. "+ + "If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.") + + fs.StringVar(&s.ServerCert.CertKey.CertFile, "tls-cert-file", s.ServerCert.CertKey.CertFile, ""+ + "File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated "+ + "after server cert). If HTTPS serving is enabled, and --tls-cert-file and "+ + "--tls-private-key-file are not provided, a self-signed certificate and key "+ + "are generated for the public address and saved to the directory specified by --cert-dir.") + + fs.StringVar(&s.ServerCert.CertKey.KeyFile, "tls-private-key-file", s.ServerCert.CertKey.KeyFile, + "File containing the default x509 private key matching --tls-cert-file.") + + tlsCipherPreferredValues := cliflag.PreferredTLSCipherNames() + tlsCipherInsecureValues := cliflag.InsecureTLSCipherNames() + fs.StringSliceVar(&s.CipherSuites, "tls-cipher-suites", s.CipherSuites, + "Comma-separated list of cipher suites for the server. "+ + "If omitted, the default Go cipher suites will be used. \n"+ + "Preferred values: "+strings.Join(tlsCipherPreferredValues, ", ")+". \n"+ + "Insecure values: "+strings.Join(tlsCipherInsecureValues, ", ")+".") + + tlsPossibleVersions := cliflag.TLSPossibleVersions() + fs.StringVar(&s.MinTLSVersion, "tls-min-version", s.MinTLSVersion, + "Minimum TLS version supported. "+ + "Possible values: "+strings.Join(tlsPossibleVersions, ", ")) + + fs.Var(cliflag.NewNamedCertKeyArray(&s.SNICertKeys), "tls-sni-cert-key", ""+ + "A pair of x509 certificate and private key file paths, optionally suffixed with a list of "+ + "domain patterns which are fully qualified domain names, possibly with prefixed wildcard "+ + "segments. The domain patterns also allow IP addresses, but IPs should only be used if "+ + "the apiserver has visibility to the IP address requested by a client. "+ + "If no domain patterns are provided, the names of the certificate are "+ + "extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns "+ + "trump over extracted names. For multiple key/certificate pairs, use the "+ + "--tls-sni-cert-key multiple times. "+ + "Examples: \"example.crt,example.key\" or \"foo.crt,foo.key:*.foo.com,foo.com\".") + + fs.IntVar(&s.HTTP2MaxStreamsPerConnection, "http2-max-streams-per-connection", s.HTTP2MaxStreamsPerConnection, ""+ + "The limit that the server gives to clients for "+ + "the maximum number of streams in an HTTP/2 connection. "+ + "Zero means to use golang's default.") + + fs.BoolVar(&s.PermitPortSharing, "permit-port-sharing", s.PermitPortSharing, + "If true, SO_REUSEPORT will be used when binding the port, which allows "+ + "more than one instance to bind on the same address and port. [default=false]") +} + +// ApplyTo fills up serving information in the server configuration. +func (s *SecureServingOptions) ApplyTo(config **server.SecureServingInfo) error { + if s == nil { + return nil + } + if s.BindPort <= 0 && s.Listener == nil { + return nil + } + + if s.Listener == nil { + var err error + addr := net.JoinHostPort(s.BindAddress.String(), strconv.Itoa(s.BindPort)) + + c := net.ListenConfig{} + + if s.PermitPortSharing { + c.Control = permitPortReuse + } + + s.Listener, s.BindPort, err = CreateListener(s.BindNetwork, addr, c) + if err != nil { + return fmt.Errorf("failed to create listener: %v", err) + } + } else { + if _, ok := s.Listener.Addr().(*net.TCPAddr); !ok { + return fmt.Errorf("failed to parse ip and port from listener") + } + s.BindPort = s.Listener.Addr().(*net.TCPAddr).Port + s.BindAddress = s.Listener.Addr().(*net.TCPAddr).IP + } + + *config = &server.SecureServingInfo{ + Listener: s.Listener, + HTTP2MaxStreamsPerConnection: s.HTTP2MaxStreamsPerConnection, + } + c := *config + + serverCertFile, serverKeyFile := s.ServerCert.CertKey.CertFile, s.ServerCert.CertKey.KeyFile + // load main cert + if len(serverCertFile) != 0 || len(serverKeyFile) != 0 { + var err error + c.Cert, err = dynamiccertificates.NewDynamicServingContentFromFiles("serving-cert", serverCertFile, serverKeyFile) + if err != nil { + return err + } + } else if s.ServerCert.GeneratedCert != nil { + c.Cert = s.ServerCert.GeneratedCert + } + + if len(s.CipherSuites) != 0 { + cipherSuites, err := cliflag.TLSCipherSuites(s.CipherSuites) + if err != nil { + return err + } + c.CipherSuites = cipherSuites + } + + var err error + c.MinTLSVersion, err = cliflag.TLSVersion(s.MinTLSVersion) + if err != nil { + return err + } + + // load SNI certs + namedTLSCerts := make([]dynamiccertificates.SNICertKeyContentProvider, 0, len(s.SNICertKeys)) + for _, nck := range s.SNICertKeys { + tlsCert, err := dynamiccertificates.NewDynamicSNIContentFromFiles("sni-serving-cert", nck.CertFile, nck.KeyFile, nck.Names...) + namedTLSCerts = append(namedTLSCerts, tlsCert) + if err != nil { + return fmt.Errorf("failed to load SNI cert and key: %v", err) + } + } + c.SNICerts = namedTLSCerts + + return nil +} + +func (s *SecureServingOptions) MaybeDefaultWithSelfSignedCerts(publicAddress string, alternateDNS []string, alternateIPs []net.IP) error { + if s == nil || (s.BindPort == 0 && s.Listener == nil) { + return nil + } + keyCert := &s.ServerCert.CertKey + if len(keyCert.CertFile) != 0 || len(keyCert.KeyFile) != 0 { + return nil + } + + canReadCertAndKey := false + if len(s.ServerCert.CertDirectory) > 0 { + if len(s.ServerCert.PairName) == 0 { + return fmt.Errorf("PairName is required if CertDirectory is set") + } + keyCert.CertFile = path.Join(s.ServerCert.CertDirectory, s.ServerCert.PairName+".crt") + keyCert.KeyFile = path.Join(s.ServerCert.CertDirectory, s.ServerCert.PairName+".key") + if canRead, err := certutil.CanReadCertAndKey(keyCert.CertFile, keyCert.KeyFile); err != nil { + return err + } else { + canReadCertAndKey = canRead + } + } + + if !canReadCertAndKey { + // add either the bind address or localhost to the valid alternates + if s.BindAddress.IsUnspecified() { + alternateDNS = append(alternateDNS, "localhost") + } else { + alternateIPs = append(alternateIPs, s.BindAddress) + } + + if cert, key, err := certutil.GenerateSelfSignedCertKeyWithFixtures(publicAddress, alternateIPs, alternateDNS, s.ServerCert.FixtureDirectory); err != nil { + return fmt.Errorf("unable to generate self signed cert: %v", err) + } else if len(keyCert.CertFile) > 0 && len(keyCert.KeyFile) > 0 { + if err := certutil.WriteCert(keyCert.CertFile, cert); err != nil { + return err + } + if err := keyutil.WriteKey(keyCert.KeyFile, key); err != nil { + return err + } + klog.Infof("Generated self-signed cert (%s, %s)", keyCert.CertFile, keyCert.KeyFile) + } else { + s.ServerCert.GeneratedCert, err = dynamiccertificates.NewStaticCertKeyContent("Generated self signed cert", cert, key) + if err != nil { + return err + } + klog.Infof("Generated self-signed cert in-memory") + } + } + + return nil +} + +func CreateListener(network, addr string, config net.ListenConfig) (net.Listener, int, error) { + if len(network) == 0 { + network = "tcp" + } + + ln, err := config.Listen(context.TODO(), network, addr) + if err != nil { + return nil, 0, fmt.Errorf("failed to listen on %v: %v", addr, err) + } + + // get port + tcpAddr, ok := ln.Addr().(*net.TCPAddr) + if !ok { + ln.Close() + return nil, 0, fmt.Errorf("invalid listen address: %q", ln.Addr().String()) + } + + return ln, tcpAddr.Port, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go b/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go new file mode 100644 index 0000000000..221a5474bd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go @@ -0,0 +1,31 @@ +// +build !windows + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +func permitPortReuse(network, addr string, conn syscall.RawConn) error { + return conn.Control(func(fd uintptr) { + syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go b/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go new file mode 100644 index 0000000000..1941890234 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go @@ -0,0 +1,30 @@ +// +build windows + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "syscall" +) + +// Windows only supports SO_REUSEADDR, which may cause undefined behavior, as +// there is no protection against port hijacking. +func permitPortReuse(network, address string, c syscall.RawConn) error { + return fmt.Errorf("port reuse is not supported on Windows") +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go b/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go new file mode 100644 index 0000000000..9f9a42f81b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go @@ -0,0 +1,79 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + + "github.com/google/uuid" + + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + "k8s.io/client-go/rest" + certutil "k8s.io/client-go/util/cert" +) + +type SecureServingOptionsWithLoopback struct { + *SecureServingOptions +} + +func (o *SecureServingOptions) WithLoopback() *SecureServingOptionsWithLoopback { + return &SecureServingOptionsWithLoopback{o} +} + +// ApplyTo fills up serving information in the server configuration. +func (s *SecureServingOptionsWithLoopback) ApplyTo(secureServingInfo **server.SecureServingInfo, loopbackClientConfig **rest.Config) error { + if s == nil || s.SecureServingOptions == nil || secureServingInfo == nil { + return nil + } + + if err := s.SecureServingOptions.ApplyTo(secureServingInfo); err != nil { + return err + } + + if *secureServingInfo == nil || loopbackClientConfig == nil { + return nil + } + + // create self-signed cert+key with the fake server.LoopbackClientServerNameOverride and + // let the server return it when the loopback client connects. + certPem, keyPem, err := certutil.GenerateSelfSignedCertKey(server.LoopbackClientServerNameOverride, nil, nil) + if err != nil { + return fmt.Errorf("failed to generate self-signed certificate for loopback connection: %v", err) + } + certProvider, err := dynamiccertificates.NewStaticSNICertKeyContent("self-signed loopback", certPem, keyPem, server.LoopbackClientServerNameOverride) + if err != nil { + return fmt.Errorf("failed to generate self-signed certificate for loopback connection: %v", err) + } + + secureLoopbackClientConfig, err := (*secureServingInfo).NewLoopbackClientConfig(uuid.New().String(), certPem) + switch { + // if we failed and there's no fallback loopback client config, we need to fail + case err != nil && *loopbackClientConfig == nil: + return err + + // if we failed, but we already have a fallback loopback client config (usually insecure), allow it + case err != nil && *loopbackClientConfig != nil: + + default: + *loopbackClientConfig = secureLoopbackClientConfig + // Write to the front of SNICerts so that this overrides any other certs with the same name + (*secureServingInfo).SNICerts = append([]dynamiccertificates.SNICertKeyContentProvider{certProvider}, (*secureServingInfo).SNICerts...) + } + + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/plugins.go b/vendor/k8s.io/apiserver/pkg/server/plugins.go new file mode 100644 index 0000000000..7a9094337d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/plugins.go @@ -0,0 +1,32 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +// This file exists to force the desired plugin implementations to be linked into genericapi pkg. +import ( + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle" + mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" + validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating" +) + +// RegisterAllAdmissionPlugins registers all admission plugins +func RegisterAllAdmissionPlugins(plugins *admission.Plugins) { + lifecycle.Register(plugins) + validatingwebhook.Register(plugins) + mutatingwebhook.Register(plugins) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/resourceconfig/doc.go b/vendor/k8s.io/apiserver/pkg/server/resourceconfig/doc.go new file mode 100644 index 0000000000..0dae21535d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/resourceconfig/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resourceconfig contains the resource config related helper functions. +package resourceconfig // import "k8s.io/apiserver/pkg/server/resourceconfig" diff --git a/vendor/k8s.io/apiserver/pkg/server/resourceconfig/helpers.go b/vendor/k8s.io/apiserver/pkg/server/resourceconfig/helpers.go new file mode 100644 index 0000000000..bfcce54b8d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/resourceconfig/helpers.go @@ -0,0 +1,201 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourceconfig + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + serverstore "k8s.io/apiserver/pkg/server/storage" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/klog/v2" +) + +// GroupVersionRegistry provides access to registered group versions. +type GroupVersionRegistry interface { + // IsGroupRegistered returns true if given group is registered. + IsGroupRegistered(group string) bool + // IsVersionRegistered returns true if given version is registered. + IsVersionRegistered(v schema.GroupVersion) bool + // PrioritizedVersionsAllGroups returns all registered group versions. + PrioritizedVersionsAllGroups() []schema.GroupVersion +} + +// MergeResourceEncodingConfigs merges the given defaultResourceConfig with specific GroupVersionResource overrides. +func MergeResourceEncodingConfigs( + defaultResourceEncoding *serverstore.DefaultResourceEncodingConfig, + resourceEncodingOverrides []schema.GroupVersionResource, +) *serverstore.DefaultResourceEncodingConfig { + resourceEncodingConfig := defaultResourceEncoding + for _, gvr := range resourceEncodingOverrides { + resourceEncodingConfig.SetResourceEncoding(gvr.GroupResource(), gvr.GroupVersion(), + schema.GroupVersion{Group: gvr.Group, Version: runtime.APIVersionInternal}) + } + return resourceEncodingConfig +} + +// Recognized values for the --runtime-config parameter to enable/disable groups of APIs +const ( + APIAll = "api/all" + APIGA = "api/ga" + APIBeta = "api/beta" + APIAlpha = "api/alpha" +) + +var ( + gaPattern = regexp.MustCompile(`^v\d+$`) + betaPattern = regexp.MustCompile(`^v\d+beta\d+$`) + alphaPattern = regexp.MustCompile(`^v\d+alpha\d+$`) + + matchers = map[string]func(gv schema.GroupVersion) bool{ + // allows users to address all api versions + APIAll: func(gv schema.GroupVersion) bool { return true }, + // allows users to address all api versions in the form v[0-9]+ + APIGA: func(gv schema.GroupVersion) bool { return gaPattern.MatchString(gv.Version) }, + // allows users to address all beta api versions + APIBeta: func(gv schema.GroupVersion) bool { return betaPattern.MatchString(gv.Version) }, + // allows users to address all alpha api versions + APIAlpha: func(gv schema.GroupVersion) bool { return alphaPattern.MatchString(gv.Version) }, + } + + matcherOrder = []string{APIAll, APIGA, APIBeta, APIAlpha} +) + +// MergeAPIResourceConfigs merges the given defaultAPIResourceConfig with the given resourceConfigOverrides. +// Exclude the groups not registered in registry, and check if version is +// not registered in group, then it will fail. +func MergeAPIResourceConfigs( + defaultAPIResourceConfig *serverstore.ResourceConfig, + resourceConfigOverrides cliflag.ConfigurationMap, + registry GroupVersionRegistry, +) (*serverstore.ResourceConfig, error) { + resourceConfig := defaultAPIResourceConfig + overrides := resourceConfigOverrides + + for _, flag := range matcherOrder { + if value, ok := overrides[flag]; ok { + if value == "false" { + resourceConfig.DisableMatchingVersions(matchers[flag]) + } else if value == "true" { + resourceConfig.EnableMatchingVersions(matchers[flag]) + } else { + return nil, fmt.Errorf("invalid value %v=%v", flag, value) + } + } + } + + // "={true|false} allows users to enable/disable API. + // This takes preference over api/all, if specified. + // Iterate through all group/version overrides specified in runtimeConfig. + for key := range overrides { + // Have already handled them above. Can skip them here. + if _, ok := matchers[key]; ok { + continue + } + + tokens := strings.Split(key, "/") + if len(tokens) < 2 { + continue + } + groupVersionString := tokens[0] + "/" + tokens[1] + groupVersion, err := schema.ParseGroupVersion(groupVersionString) + if err != nil { + return nil, fmt.Errorf("invalid key %s", key) + } + + // individual resource enablement/disablement is only supported in the extensions/v1beta1 API group for legacy reasons. + // all other API groups are expected to contain coherent sets of resources that are enabled/disabled together. + if len(tokens) > 2 && (groupVersion != schema.GroupVersion{Group: "extensions", Version: "v1beta1"}) { + klog.Warningf("ignoring invalid key %s, individual resource enablement/disablement is not supported in %s, and will prevent starting in future releases", key, groupVersion.String()) + continue + } + + // Exclude group not registered into the registry. + if !registry.IsGroupRegistered(groupVersion.Group) { + continue + } + + // Verify that the groupVersion is registered into registry. + if !registry.IsVersionRegistered(groupVersion) { + return nil, fmt.Errorf("group version %s that has not been registered", groupVersion.String()) + } + enabled, err := getRuntimeConfigValue(overrides, key, false) + if err != nil { + return nil, err + } + if enabled { + // enable the groupVersion for "group/version=true" and "group/version/resource=true" + resourceConfig.EnableVersions(groupVersion) + } else if len(tokens) == 2 { + // disable the groupVersion only for "group/version=false", not "group/version/resource=false" + resourceConfig.DisableVersions(groupVersion) + } + + if len(tokens) < 3 { + continue + } + groupVersionResource := groupVersion.WithResource(tokens[2]) + if enabled { + resourceConfig.EnableResources(groupVersionResource) + } else { + resourceConfig.DisableResources(groupVersionResource) + } + } + + return resourceConfig, nil +} + +func getRuntimeConfigValue(overrides cliflag.ConfigurationMap, apiKey string, defaultValue bool) (bool, error) { + flagValue, ok := overrides[apiKey] + if ok { + if flagValue == "" { + return true, nil + } + boolValue, err := strconv.ParseBool(flagValue) + if err != nil { + return false, fmt.Errorf("invalid value of %s: %s, err: %v", apiKey, flagValue, err) + } + return boolValue, nil + } + return defaultValue, nil +} + +// ParseGroups takes in resourceConfig and returns parsed groups. +func ParseGroups(resourceConfig cliflag.ConfigurationMap) ([]string, error) { + groups := []string{} + for key := range resourceConfig { + if _, ok := matchers[key]; ok { + continue + } + tokens := strings.Split(key, "/") + if len(tokens) != 2 && len(tokens) != 3 { + return groups, fmt.Errorf("runtime-config invalid key %s", key) + } + groupVersionString := tokens[0] + "/" + tokens[1] + groupVersion, err := schema.ParseGroupVersion(groupVersionString) + if err != nil { + return nil, fmt.Errorf("runtime-config invalid key %s", key) + } + groups = append(groups, groupVersion.Group) + } + + return groups, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/OWNERS b/vendor/k8s.io/apiserver/pkg/server/routes/OWNERS new file mode 100644 index 0000000000..4da107c8c3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- sttts diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/doc.go b/vendor/k8s.io/apiserver/pkg/server/routes/doc.go new file mode 100644 index 0000000000..603e899cde --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package routes holds a collection of optional genericapiserver http handlers. +package routes // import "k8s.io/apiserver/pkg/server/routes" diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/flags.go b/vendor/k8s.io/apiserver/pkg/server/routes/flags.go new file mode 100644 index 0000000000..55835a6e85 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/flags.go @@ -0,0 +1,127 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + "fmt" + "html/template" + "io/ioutil" + "net/http" + "path" + "sync" + + "k8s.io/klog/v2" + + "k8s.io/apiserver/pkg/server/mux" +) + +var ( + lock = &sync.RWMutex{} + registeredFlags = map[string]debugFlag{} +) + +// DebugFlags adds handlers for flags under /debug/flags. +type DebugFlags struct { +} + +// Install registers the APIServer's flags handler. +func (f DebugFlags) Install(c *mux.PathRecorderMux, flag string, handler func(http.ResponseWriter, *http.Request)) { + c.UnlistedHandle("/debug/flags", http.HandlerFunc(f.Index)) + c.UnlistedHandlePrefix("/debug/flags/", http.HandlerFunc(f.Index)) + + url := path.Join("/debug/flags", flag) + c.UnlistedHandleFunc(url, handler) + + f.addFlag(flag) +} + +// Index responds with the `/debug/flags` request. +// For example, "/debug/flags/v" serves the "--v" flag. +// Index responds to a request for "/debug/flags/" with an HTML page +// listing the available flags. +func (f DebugFlags) Index(w http.ResponseWriter, r *http.Request) { + lock.RLock() + defer lock.RUnlock() + if err := indexTmpl.Execute(w, registeredFlags); err != nil { + klog.Error(err) + } +} + +var indexTmpl = template.Must(template.New("index").Parse(` + +/debug/flags/ + + +/debug/flags/
+
+flags:
+ +{{range .}} +{{.Flag}}
+{{end}} +
+
+full flags configurable
+ + +`)) + +type debugFlag struct { + Flag string +} + +func (f DebugFlags) addFlag(flag string) { + lock.Lock() + defer lock.Unlock() + registeredFlags[flag] = debugFlag{flag} +} + +// StringFlagSetterFunc is a func used for setting string type flag. +type StringFlagSetterFunc func(string) (string, error) + +// StringFlagPutHandler wraps an http Handler to set string type flag. +func StringFlagPutHandler(setter StringFlagSetterFunc) http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + switch { + case req.Method == "PUT": + body, err := ioutil.ReadAll(req.Body) + if err != nil { + writePlainText(http.StatusBadRequest, "error reading request body: "+err.Error(), w) + return + } + defer req.Body.Close() + response, err := setter(string(body)) + if err != nil { + writePlainText(http.StatusBadRequest, err.Error(), w) + return + } + writePlainText(http.StatusOK, response, w) + return + default: + writePlainText(http.StatusNotAcceptable, "unsupported http method", w) + return + } + }) +} + +// writePlainText renders a simple string response. +func writePlainText(statusCode int, text string, w http.ResponseWriter) { + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.WriteHeader(statusCode) + fmt.Fprintln(w, text) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/index.go b/vendor/k8s.io/apiserver/pkg/server/routes/index.go new file mode 100644 index 0000000000..1407579886 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/index.go @@ -0,0 +1,69 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + "net/http" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/server/mux" +) + +// ListedPathProvider is an interface for providing paths that should be reported at /. +type ListedPathProvider interface { + // ListedPaths is an alphabetically sorted list of paths to be reported at /. + ListedPaths() []string +} + +// ListedPathProviders is a convenient way to combine multiple ListedPathProviders +type ListedPathProviders []ListedPathProvider + +// ListedPaths unions and sorts the included paths. +func (p ListedPathProviders) ListedPaths() []string { + ret := sets.String{} + for _, provider := range p { + for _, path := range provider.ListedPaths() { + ret.Insert(path) + } + } + + return ret.List() +} + +// Index provides a webservice for the http root / listing all known paths. +type Index struct{} + +// Install adds the Index webservice to the given mux. +func (i Index) Install(pathProvider ListedPathProvider, mux *mux.PathRecorderMux) { + handler := IndexLister{StatusCode: http.StatusOK, PathProvider: pathProvider} + + mux.UnlistedHandle("/", handler) + mux.UnlistedHandle("/index.html", handler) +} + +// IndexLister lists the available indexes with the status code provided +type IndexLister struct { + StatusCode int + PathProvider ListedPathProvider +} + +// ServeHTTP serves the available paths. +func (i IndexLister) ServeHTTP(w http.ResponseWriter, r *http.Request) { + responsewriters.WriteRawJSON(i.StatusCode, metav1.RootPaths{Paths: i.PathProvider.ListedPaths()}, w) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go b/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go new file mode 100644 index 0000000000..1121e95c36 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go @@ -0,0 +1,51 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + apimetrics "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/server/mux" + etcd3metrics "k8s.io/apiserver/pkg/storage/etcd3/metrics" + flowcontrolmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +// DefaultMetrics installs the default prometheus metrics handler +type DefaultMetrics struct{} + +// Install adds the DefaultMetrics handler +func (m DefaultMetrics) Install(c *mux.PathRecorderMux) { + register() + c.Handle("/metrics", legacyregistry.Handler()) +} + +// MetricsWithReset install the prometheus metrics handler extended with support for the DELETE method +// which resets the metrics. +type MetricsWithReset struct{} + +// Install adds the MetricsWithReset handler +func (m MetricsWithReset) Install(c *mux.PathRecorderMux) { + register() + c.Handle("/metrics", legacyregistry.HandlerWithReset()) +} + +// register apiserver and etcd metrics +func register() { + apimetrics.Register() + etcd3metrics.Register() + flowcontrolmetrics.Register() +} diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go b/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go new file mode 100644 index 0000000000..c9fb43b959 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + restful "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" + "k8s.io/klog/v2" + + "k8s.io/apiserver/pkg/server/mux" + "k8s.io/kube-openapi/pkg/builder" + "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/handler" +) + +// OpenAPI installs spec endpoints for each web service. +type OpenAPI struct { + Config *common.Config +} + +// Install adds the SwaggerUI webservice to the given mux. +func (oa OpenAPI) Install(c *restful.Container, mux *mux.PathRecorderMux) (*handler.OpenAPIService, *spec.Swagger) { + spec, err := builder.BuildOpenAPISpec(c.RegisteredWebServices(), oa.Config) + if err != nil { + klog.Fatalf("Failed to build open api spec for root: %v", err) + } + spec.Definitions = handler.PruneDefaults(spec.Definitions) + openAPIVersionedService, err := handler.NewOpenAPIService(spec) + if err != nil { + klog.Fatalf("Failed to create OpenAPIService: %v", err) + } + + err = openAPIVersionedService.RegisterOpenAPIVersionedService("/openapi/v2", mux) + if err != nil { + klog.Fatalf("Failed to register versioned open api spec for root: %v", err) + } + + return openAPIVersionedService, spec +} diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/profiling.go b/vendor/k8s.io/apiserver/pkg/server/routes/profiling.go new file mode 100644 index 0000000000..b57d590f52 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/profiling.go @@ -0,0 +1,43 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + "net/http" + "net/http/pprof" + + "k8s.io/apiserver/pkg/server/mux" +) + +// Profiling adds handlers for pprof under /debug/pprof. +type Profiling struct{} + +// Install adds the Profiling webservice to the given mux. +func (d Profiling) Install(c *mux.PathRecorderMux) { + c.UnlistedHandleFunc("/debug/pprof", redirectTo("/debug/pprof/")) + c.UnlistedHandlePrefix("/debug/pprof/", http.HandlerFunc(pprof.Index)) + c.UnlistedHandleFunc("/debug/pprof/profile", pprof.Profile) + c.UnlistedHandleFunc("/debug/pprof/symbol", pprof.Symbol) + c.UnlistedHandleFunc("/debug/pprof/trace", pprof.Trace) +} + +// redirectTo redirects request to a certain destination. +func redirectTo(to string) func(http.ResponseWriter, *http.Request) { + return func(rw http.ResponseWriter, req *http.Request) { + http.Redirect(rw, req, to, http.StatusFound) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/version.go b/vendor/k8s.io/apiserver/pkg/server/routes/version.go new file mode 100644 index 0000000000..dd9ab0dcfe --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/version.go @@ -0,0 +1,57 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + "net/http" + + "github.com/emicklei/go-restful" + + "k8s.io/apimachinery/pkg/version" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" +) + +// Version provides a webservice with version information. +type Version struct { + Version *version.Info +} + +// Install registers the APIServer's `/version` handler. +func (v Version) Install(c *restful.Container) { + if v.Version == nil { + return + } + + // Set up a service to return the git code version. + versionWS := new(restful.WebService) + versionWS.Path("/version") + versionWS.Doc("git code version from which this is built") + versionWS.Route( + versionWS.GET("/").To(v.handleVersion). + Doc("get the code version"). + Operation("getCodeVersion"). + Produces(restful.MIME_JSON). + Consumes(restful.MIME_JSON). + Writes(version.Info{})) + + c.Add(versionWS) +} + +// handleVersion writes the server's version information. +func (v Version) handleVersion(req *restful.Request, resp *restful.Response) { + responsewriters.WriteRawJSON(http.StatusOK, *v.Version, resp.ResponseWriter) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/secure_serving.go b/vendor/k8s.io/apiserver/pkg/server/secure_serving.go new file mode 100644 index 0000000000..38341eb03b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/secure_serving.go @@ -0,0 +1,289 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "strings" + "time" + + "golang.org/x/net/http2" + "k8s.io/component-base/cli/flag" + "k8s.io/klog/v2" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/server/dynamiccertificates" +) + +const ( + defaultKeepAlivePeriod = 3 * time.Minute +) + +// tlsConfig produces the tls.Config to serve with. +func (s *SecureServingInfo) tlsConfig(stopCh <-chan struct{}) (*tls.Config, error) { + tlsConfig := &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + // enable HTTP2 for go's 1.7 HTTP Server + NextProtos: []string{"h2", "http/1.1"}, + } + + // these are static aspects of the tls.Config + if s.DisableHTTP2 { + klog.Info("Forcing use of http/1.1 only") + tlsConfig.NextProtos = []string{"http/1.1"} + } + if s.MinTLSVersion > 0 { + tlsConfig.MinVersion = s.MinTLSVersion + } + if len(s.CipherSuites) > 0 { + tlsConfig.CipherSuites = s.CipherSuites + insecureCiphers := flag.InsecureTLSCiphers() + for i := 0; i < len(s.CipherSuites); i++ { + for cipherName, cipherID := range insecureCiphers { + if s.CipherSuites[i] == cipherID { + klog.Warningf("Use of insecure cipher '%s' detected.", cipherName) + } + } + } + } + + if s.ClientCA != nil { + // Populate PeerCertificates in requests, but don't reject connections without certificates + // This allows certificates to be validated by authenticators, while still allowing other auth types + tlsConfig.ClientAuth = tls.RequestClientCert + } + + if s.ClientCA != nil || s.Cert != nil || len(s.SNICerts) > 0 { + dynamicCertificateController := dynamiccertificates.NewDynamicServingCertificateController( + tlsConfig, + s.ClientCA, + s.Cert, + s.SNICerts, + nil, // TODO see how to plumb an event recorder down in here. For now this results in simply klog messages. + ) + // register if possible + if notifier, ok := s.ClientCA.(dynamiccertificates.Notifier); ok { + notifier.AddListener(dynamicCertificateController) + } + if notifier, ok := s.Cert.(dynamiccertificates.Notifier); ok { + notifier.AddListener(dynamicCertificateController) + } + // start controllers if possible + if controller, ok := s.ClientCA.(dynamiccertificates.ControllerRunner); ok { + // runonce to try to prime data. If this fails, it's ok because we fail closed. + // Files are required to be populated already, so this is for convenience. + if err := controller.RunOnce(); err != nil { + klog.Warningf("Initial population of client CA failed: %v", err) + } + + go controller.Run(1, stopCh) + } + if controller, ok := s.Cert.(dynamiccertificates.ControllerRunner); ok { + // runonce to try to prime data. If this fails, it's ok because we fail closed. + // Files are required to be populated already, so this is for convenience. + if err := controller.RunOnce(); err != nil { + klog.Warningf("Initial population of default serving certificate failed: %v", err) + } + + go controller.Run(1, stopCh) + } + for _, sniCert := range s.SNICerts { + if notifier, ok := sniCert.(dynamiccertificates.Notifier); ok { + notifier.AddListener(dynamicCertificateController) + } + + if controller, ok := sniCert.(dynamiccertificates.ControllerRunner); ok { + // runonce to try to prime data. If this fails, it's ok because we fail closed. + // Files are required to be populated already, so this is for convenience. + if err := controller.RunOnce(); err != nil { + klog.Warningf("Initial population of SNI serving certificate failed: %v", err) + } + + go controller.Run(1, stopCh) + } + } + + // runonce to try to prime data. If this fails, it's ok because we fail closed. + // Files are required to be populated already, so this is for convenience. + if err := dynamicCertificateController.RunOnce(); err != nil { + klog.Warningf("Initial population of dynamic certificates failed: %v", err) + } + go dynamicCertificateController.Run(1, stopCh) + + tlsConfig.GetConfigForClient = dynamicCertificateController.GetConfigForClient + } + + return tlsConfig, nil +} + +// Serve runs the secure http server. It fails only if certificates cannot be loaded or the initial listen call fails. +// The actual server loop (stoppable by closing stopCh) runs in a go routine, i.e. Serve does not block. +// It returns a stoppedCh that is closed when all non-hijacked active requests have been processed. +func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) (<-chan struct{}, error) { + if s.Listener == nil { + return nil, fmt.Errorf("listener must not be nil") + } + + tlsConfig, err := s.tlsConfig(stopCh) + if err != nil { + return nil, err + } + + secureServer := &http.Server{ + Addr: s.Listener.Addr().String(), + Handler: handler, + MaxHeaderBytes: 1 << 20, + TLSConfig: tlsConfig, + } + + // At least 99% of serialized resources in surveyed clusters were smaller than 256kb. + // This should be big enough to accommodate most API POST requests in a single frame, + // and small enough to allow a per connection buffer of this size multiplied by `MaxConcurrentStreams`. + const resourceBody99Percentile = 256 * 1024 + + http2Options := &http2.Server{} + + // shrink the per-stream buffer and max framesize from the 1MB default while still accommodating most API POST requests in a single frame + http2Options.MaxUploadBufferPerStream = resourceBody99Percentile + http2Options.MaxReadFrameSize = resourceBody99Percentile + + // use the overridden concurrent streams setting or make the default of 250 explicit so we can size MaxUploadBufferPerConnection appropriately + if s.HTTP2MaxStreamsPerConnection > 0 { + http2Options.MaxConcurrentStreams = uint32(s.HTTP2MaxStreamsPerConnection) + } else { + http2Options.MaxConcurrentStreams = 250 + } + + // increase the connection buffer size from the 1MB default to handle the specified number of concurrent streams + http2Options.MaxUploadBufferPerConnection = http2Options.MaxUploadBufferPerStream * int32(http2Options.MaxConcurrentStreams) + + if !s.DisableHTTP2 { + // apply settings to the server + if err := http2.ConfigureServer(secureServer, http2Options); err != nil { + return nil, fmt.Errorf("error configuring http2: %v", err) + } + } + + // use tlsHandshakeErrorWriter to handle messages of tls handshake error + tlsErrorWriter := &tlsHandshakeErrorWriter{os.Stderr} + tlsErrorLogger := log.New(tlsErrorWriter, "", 0) + secureServer.ErrorLog = tlsErrorLogger + + klog.Infof("Serving securely on %s", secureServer.Addr) + return RunServer(secureServer, s.Listener, shutdownTimeout, stopCh) +} + +// RunServer spawns a go-routine continuously serving until the stopCh is +// closed. +// It returns a stoppedCh that is closed when all non-hijacked active requests +// have been processed. +// This function does not block +// TODO: make private when insecure serving is gone from the kube-apiserver +func RunServer( + server *http.Server, + ln net.Listener, + shutDownTimeout time.Duration, + stopCh <-chan struct{}, +) (<-chan struct{}, error) { + if ln == nil { + return nil, fmt.Errorf("listener must not be nil") + } + + // Shutdown server gracefully. + stoppedCh := make(chan struct{}) + go func() { + defer close(stoppedCh) + <-stopCh + ctx, cancel := context.WithTimeout(context.Background(), shutDownTimeout) + server.Shutdown(ctx) + cancel() + }() + + go func() { + defer utilruntime.HandleCrash() + + var listener net.Listener + listener = tcpKeepAliveListener{ln} + if server.TLSConfig != nil { + listener = tls.NewListener(listener, server.TLSConfig) + } + + err := server.Serve(listener) + + msg := fmt.Sprintf("Stopped listening on %s", ln.Addr().String()) + select { + case <-stopCh: + klog.Info(msg) + default: + panic(fmt.Sprintf("%s due to error: %v", msg, err)) + } + }() + + return stoppedCh, nil +} + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +// +// Copied from Go 1.7.2 net/http/server.go +type tcpKeepAliveListener struct { + net.Listener +} + +func (ln tcpKeepAliveListener) Accept() (net.Conn, error) { + c, err := ln.Listener.Accept() + if err != nil { + return nil, err + } + if tc, ok := c.(*net.TCPConn); ok { + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(defaultKeepAlivePeriod) + } + return c, nil +} + +// tlsHandshakeErrorWriter writes TLS handshake errors to klog with +// trace level - V(5), to avoid flooding of tls handshake errors. +type tlsHandshakeErrorWriter struct { + out io.Writer +} + +const tlsHandshakeErrorPrefix = "http: TLS handshake error" + +func (w *tlsHandshakeErrorWriter) Write(p []byte) (int, error) { + if strings.Contains(string(p), tlsHandshakeErrorPrefix) { + klog.V(5).Info(string(p)) + metrics.TLSHandshakeErrors.Inc() + return len(p), nil + } + + // for non tls handshake error, log it as usual + return w.out.Write(p) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/signal.go b/vendor/k8s.io/apiserver/pkg/server/signal.go new file mode 100644 index 0000000000..e5334ae4c1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/signal.go @@ -0,0 +1,69 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "os" + "os/signal" +) + +var onlyOneSignalHandler = make(chan struct{}) +var shutdownHandler chan os.Signal + +// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned +// which is closed on one of these signals. If a second signal is caught, the program +// is terminated with exit code 1. +// Only one of SetupSignalContext and SetupSignalHandler should be called, and only can +// be called once. +func SetupSignalHandler() <-chan struct{} { + return SetupSignalContext().Done() +} + +// SetupSignalContext is same as SetupSignalHandler, but a context.Context is returned. +// Only one of SetupSignalContext and SetupSignalHandler should be called, and only can +// be called once. +func SetupSignalContext() context.Context { + close(onlyOneSignalHandler) // panics when called twice + + shutdownHandler = make(chan os.Signal, 2) + + ctx, cancel := context.WithCancel(context.Background()) + signal.Notify(shutdownHandler, shutdownSignals...) + go func() { + <-shutdownHandler + cancel() + <-shutdownHandler + os.Exit(1) // second signal. Exit directly. + }() + + return ctx +} + +// RequestShutdown emulates a received event that is considered as shutdown signal (SIGTERM/SIGINT) +// This returns whether a handler was notified +func RequestShutdown() bool { + if shutdownHandler != nil { + select { + case shutdownHandler <- shutdownSignals[0]: + return true + default: + } + } + + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/server/signal_posix.go b/vendor/k8s.io/apiserver/pkg/server/signal_posix.go new file mode 100644 index 0000000000..11b3bba65f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/signal_posix.go @@ -0,0 +1,26 @@ +// +build !windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "os" + "syscall" +) + +var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} diff --git a/vendor/k8s.io/apiserver/pkg/server/signal_windows.go b/vendor/k8s.io/apiserver/pkg/server/signal_windows.go new file mode 100644 index 0000000000..e7645a2088 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/signal_windows.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "os" +) + +var shutdownSignals = []os.Signal{os.Interrupt} diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/doc.go b/vendor/k8s.io/apiserver/pkg/server/storage/doc.go new file mode 100644 index 0000000000..36b0d22524 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/storage/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package storage contains the plumbing to setup the etcd storage of the apiserver. +package storage // import "k8s.io/apiserver/pkg/server/storage" diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/resource_config.go b/vendor/k8s.io/apiserver/pkg/server/storage/resource_config.go new file mode 100644 index 0000000000..bd85ac230b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/storage/resource_config.go @@ -0,0 +1,124 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APIResourceConfigSource is the interface to determine which groups and versions are enabled +type APIResourceConfigSource interface { + VersionEnabled(version schema.GroupVersion) bool + ResourceEnabled(resource schema.GroupVersionResource) bool + AnyVersionForGroupEnabled(group string) bool +} + +var _ APIResourceConfigSource = &ResourceConfig{} + +type ResourceConfig struct { + GroupVersionConfigs map[schema.GroupVersion]bool + ResourceConfigs map[schema.GroupVersionResource]bool +} + +func NewResourceConfig() *ResourceConfig { + return &ResourceConfig{GroupVersionConfigs: map[schema.GroupVersion]bool{}, ResourceConfigs: map[schema.GroupVersionResource]bool{}} +} + +// DisableAll disables all group/versions. It does not modify individual resource enablement/disablement. +func (o *ResourceConfig) DisableAll() { + for k := range o.GroupVersionConfigs { + o.GroupVersionConfigs[k] = false + } +} + +// EnableAll enables all group/versions. It does not modify individual resource enablement/disablement. +func (o *ResourceConfig) EnableAll() { + for k := range o.GroupVersionConfigs { + o.GroupVersionConfigs[k] = true + } +} + +// DisableMatchingVersions disables all group/versions for which the matcher function returns true. It does not modify individual resource enablement/disablement. +func (o *ResourceConfig) DisableMatchingVersions(matcher func(gv schema.GroupVersion) bool) { + for k := range o.GroupVersionConfigs { + if matcher(k) { + o.GroupVersionConfigs[k] = false + } + } +} + +// EnableMatchingVersions enables all group/versions for which the matcher function returns true. It does not modify individual resource enablement/disablement. +func (o *ResourceConfig) EnableMatchingVersions(matcher func(gv schema.GroupVersion) bool) { + for k := range o.GroupVersionConfigs { + if matcher(k) { + o.GroupVersionConfigs[k] = true + } + } +} + +// DisableVersions disables the versions entirely. +func (o *ResourceConfig) DisableVersions(versions ...schema.GroupVersion) { + for _, version := range versions { + o.GroupVersionConfigs[version] = false + } +} + +func (o *ResourceConfig) EnableVersions(versions ...schema.GroupVersion) { + for _, version := range versions { + o.GroupVersionConfigs[version] = true + } +} + +func (o *ResourceConfig) VersionEnabled(version schema.GroupVersion) bool { + enabled, _ := o.GroupVersionConfigs[version] + return enabled +} + +func (o *ResourceConfig) DisableResources(resources ...schema.GroupVersionResource) { + for _, resource := range resources { + o.ResourceConfigs[resource] = false + } +} + +func (o *ResourceConfig) EnableResources(resources ...schema.GroupVersionResource) { + for _, resource := range resources { + o.ResourceConfigs[resource] = true + } +} + +func (o *ResourceConfig) ResourceEnabled(resource schema.GroupVersionResource) bool { + if !o.VersionEnabled(resource.GroupVersion()) { + return false + } + resourceEnabled, explicitlySet := o.ResourceConfigs[resource] + if !explicitlySet { + return true + } + return resourceEnabled +} + +func (o *ResourceConfig) AnyVersionForGroupEnabled(group string) bool { + for version := range o.GroupVersionConfigs { + if version.Group == group { + if o.VersionEnabled(version) { + return true + } + } + } + + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go b/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go new file mode 100644 index 0000000000..efb22fbc8d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go @@ -0,0 +1,84 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type ResourceEncodingConfig interface { + // StorageEncoding returns the serialization format for the resource. + // TODO this should actually return a GroupVersionKind since you can logically have multiple "matching" Kinds + // For now, it returns just the GroupVersion for consistency with old behavior + StorageEncodingFor(schema.GroupResource) (schema.GroupVersion, error) + + // InMemoryEncodingFor returns the groupVersion for the in memory representation the storage should convert to. + InMemoryEncodingFor(schema.GroupResource) (schema.GroupVersion, error) +} + +type DefaultResourceEncodingConfig struct { + // resources records the overriding encoding configs for individual resources. + resources map[schema.GroupResource]*OverridingResourceEncoding + scheme *runtime.Scheme +} + +type OverridingResourceEncoding struct { + ExternalResourceEncoding schema.GroupVersion + InternalResourceEncoding schema.GroupVersion +} + +var _ ResourceEncodingConfig = &DefaultResourceEncodingConfig{} + +func NewDefaultResourceEncodingConfig(scheme *runtime.Scheme) *DefaultResourceEncodingConfig { + return &DefaultResourceEncodingConfig{resources: map[schema.GroupResource]*OverridingResourceEncoding{}, scheme: scheme} +} + +func (o *DefaultResourceEncodingConfig) SetResourceEncoding(resourceBeingStored schema.GroupResource, externalEncodingVersion, internalVersion schema.GroupVersion) { + o.resources[resourceBeingStored] = &OverridingResourceEncoding{ + ExternalResourceEncoding: externalEncodingVersion, + InternalResourceEncoding: internalVersion, + } +} + +func (o *DefaultResourceEncodingConfig) StorageEncodingFor(resource schema.GroupResource) (schema.GroupVersion, error) { + if !o.scheme.IsGroupRegistered(resource.Group) { + return schema.GroupVersion{}, fmt.Errorf("group %q is not registered in scheme", resource.Group) + } + + resourceOverride, resourceExists := o.resources[resource] + if resourceExists { + return resourceOverride.ExternalResourceEncoding, nil + } + + // return the most preferred external version for the group + return o.scheme.PrioritizedVersionsForGroup(resource.Group)[0], nil +} + +func (o *DefaultResourceEncodingConfig) InMemoryEncodingFor(resource schema.GroupResource) (schema.GroupVersion, error) { + if !o.scheme.IsGroupRegistered(resource.Group) { + return schema.GroupVersion{}, fmt.Errorf("group %q is not registered in scheme", resource.Group) + } + + resourceOverride, resourceExists := o.resources[resource] + if resourceExists { + return resourceOverride.InternalResourceEncoding, nil + } + return schema.GroupVersion{Group: resource.Group, Version: runtime.APIVersionInternal}, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/storage_codec.go b/vendor/k8s.io/apiserver/pkg/server/storage/storage_codec.go new file mode 100644 index 0000000000..96faa17122 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/storage/storage_codec.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "mime" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apiserver/pkg/storage/storagebackend" +) + +// StorageCodecConfig are the arguments passed to newStorageCodecFn +type StorageCodecConfig struct { + StorageMediaType string + StorageSerializer runtime.StorageSerializer + StorageVersion schema.GroupVersion + MemoryVersion schema.GroupVersion + Config storagebackend.Config + + EncoderDecoratorFn func(runtime.Encoder) runtime.Encoder + DecoderDecoratorFn func([]runtime.Decoder) []runtime.Decoder +} + +// NewStorageCodec assembles a storage codec for the provided storage media type, the provided serializer, and the requested +// storage and memory versions. +func NewStorageCodec(opts StorageCodecConfig) (runtime.Codec, runtime.GroupVersioner, error) { + mediaType, _, err := mime.ParseMediaType(opts.StorageMediaType) + if err != nil { + return nil, nil, fmt.Errorf("%q is not a valid mime-type", opts.StorageMediaType) + } + + serializer, ok := runtime.SerializerInfoForMediaType(opts.StorageSerializer.SupportedMediaTypes(), mediaType) + if !ok { + return nil, nil, fmt.Errorf("unable to find serializer for %q", mediaType) + } + + s := serializer.Serializer + + // Give callers the opportunity to wrap encoders and decoders. For decoders, each returned decoder will + // be passed to the recognizer so that multiple decoders are available. + var encoder runtime.Encoder = s + if opts.EncoderDecoratorFn != nil { + encoder = opts.EncoderDecoratorFn(encoder) + } + decoders := []runtime.Decoder{ + // selected decoder as the primary + s, + // universal deserializer as a fallback + opts.StorageSerializer.UniversalDeserializer(), + // base64-wrapped universal deserializer as a last resort. + // this allows reading base64-encoded protobuf, which should only exist if etcd2+protobuf was used at some point. + // data written that way could exist in etcd2, or could have been migrated to etcd3. + // TODO: flag this type of data if we encounter it, require migration (read to decode, write to persist using a supported encoder), and remove in 1.8 + runtime.NewBase64Serializer(nil, opts.StorageSerializer.UniversalDeserializer()), + } + if opts.DecoderDecoratorFn != nil { + decoders = opts.DecoderDecoratorFn(decoders) + } + + encodeVersioner := runtime.NewMultiGroupVersioner( + opts.StorageVersion, + schema.GroupKind{Group: opts.StorageVersion.Group}, + schema.GroupKind{Group: opts.MemoryVersion.Group}, + ) + + // Ensure the storage receives the correct version. + encoder = opts.StorageSerializer.EncoderForVersion( + encoder, + encodeVersioner, + ) + decoder := opts.StorageSerializer.DecoderToVersion( + recognizer.NewDecoder(decoders...), + runtime.NewCoercingMultiGroupVersioner( + opts.MemoryVersion, + schema.GroupKind{Group: opts.MemoryVersion.Group}, + schema.GroupKind{Group: opts.StorageVersion.Group}, + ), + ) + + return runtime.NewCodec(encoder, decoder), encodeVersioner, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go new file mode 100644 index 0000000000..689b513223 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go @@ -0,0 +1,350 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "strings" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/apiserver/pkg/storage/value" + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +// Backend describes the storage servers, the information here should be enough +// for health validations. +type Backend struct { + // the url of storage backend like: https://etcd.domain:2379 + Server string + // the required tls config + TLSConfig *tls.Config +} + +// StorageFactory is the interface to locate the storage for a given GroupResource +type StorageFactory interface { + // New finds the storage destination for the given group and resource. It will + // return an error if the group has no storage destination configured. + NewConfig(groupResource schema.GroupResource) (*storagebackend.Config, error) + + // ResourcePrefix returns the overridden resource prefix for the GroupResource + // This allows for cohabitation of resources with different native types and provides + // centralized control over the shape of etcd directories + ResourcePrefix(groupResource schema.GroupResource) string + + // Backends gets all backends for all registered storage destinations. + // Used for getting all instances for health validations. + Backends() []Backend +} + +// DefaultStorageFactory takes a GroupResource and returns back its storage interface. This result includes: +// 1. Merged etcd config, including: auth, server locations, prefixes +// 2. Resource encodings for storage: group,version,kind to store as +// 3. Cohabitating default: some resources like hpa are exposed through multiple APIs. They must agree on 1 and 2 +type DefaultStorageFactory struct { + // StorageConfig describes how to create a storage backend in general. + // Its authentication information will be used for every storage.Interface returned. + StorageConfig storagebackend.Config + + Overrides map[schema.GroupResource]groupResourceOverrides + + DefaultResourcePrefixes map[schema.GroupResource]string + + // DefaultMediaType is the media type used to store resources. If it is not set, "application/json" is used. + DefaultMediaType string + + // DefaultSerializer is used to create encoders and decoders for the storage.Interface. + DefaultSerializer runtime.StorageSerializer + + // ResourceEncodingConfig describes how to encode a particular GroupVersionResource + ResourceEncodingConfig ResourceEncodingConfig + + // APIResourceConfigSource indicates whether the *storage* is enabled, NOT the API + // This is discrete from resource enablement because those are separate concerns. How this source is configured + // is left to the caller. + APIResourceConfigSource APIResourceConfigSource + + // newStorageCodecFn exists to be overwritten for unit testing. + newStorageCodecFn func(opts StorageCodecConfig) (codec runtime.Codec, encodeVersioner runtime.GroupVersioner, err error) +} + +type groupResourceOverrides struct { + // etcdLocation contains the list of "special" locations that are used for particular GroupResources + // These are merged on top of the StorageConfig when requesting the storage.Interface for a given GroupResource + etcdLocation []string + // etcdPrefix is the base location for a GroupResource. + etcdPrefix string + // etcdResourcePrefix is the location to use to store a particular type under the `etcdPrefix` location + // If empty, the default mapping is used. If the default mapping doesn't contain an entry, it will use + // the ToLowered name of the resource, not including the group. + etcdResourcePrefix string + // mediaType is the desired serializer to choose. If empty, the default is chosen. + mediaType string + // serializer contains the list of "special" serializers for a GroupResource. Resource=* means for the entire group + serializer runtime.StorageSerializer + // cohabitatingResources keeps track of which resources must be stored together. This happens when we have multiple ways + // of exposing one set of concepts. autoscaling.HPA and extensions.HPA as a for instance + // The order of the slice matters! It is the priority order of lookup for finding a storage location + cohabitatingResources []schema.GroupResource + // encoderDecoratorFn is optional and may wrap the provided encoder prior to being serialized. + encoderDecoratorFn func(runtime.Encoder) runtime.Encoder + // decoderDecoratorFn is optional and may wrap the provided decoders (can add new decoders). The order of + // returned decoders will be priority for attempt to decode. + decoderDecoratorFn func([]runtime.Decoder) []runtime.Decoder + // transformer is optional and shall encrypt that resource at rest. + transformer value.Transformer + // disablePaging will prevent paging on the provided resource. + disablePaging bool +} + +// Apply overrides the provided config and options if the override has a value in that position +func (o groupResourceOverrides) Apply(config *storagebackend.Config, options *StorageCodecConfig) { + if len(o.etcdLocation) > 0 { + config.Transport.ServerList = o.etcdLocation + } + if len(o.etcdPrefix) > 0 { + config.Prefix = o.etcdPrefix + } + + if len(o.mediaType) > 0 { + options.StorageMediaType = o.mediaType + } + if o.serializer != nil { + options.StorageSerializer = o.serializer + } + if o.encoderDecoratorFn != nil { + options.EncoderDecoratorFn = o.encoderDecoratorFn + } + if o.decoderDecoratorFn != nil { + options.DecoderDecoratorFn = o.decoderDecoratorFn + } + if o.transformer != nil { + config.Transformer = o.transformer + } + if o.disablePaging { + config.Paging = false + } +} + +var _ StorageFactory = &DefaultStorageFactory{} + +const AllResources = "*" + +func NewDefaultStorageFactory( + config storagebackend.Config, + defaultMediaType string, + defaultSerializer runtime.StorageSerializer, + resourceEncodingConfig ResourceEncodingConfig, + resourceConfig APIResourceConfigSource, + specialDefaultResourcePrefixes map[schema.GroupResource]string, +) *DefaultStorageFactory { + config.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) + if len(defaultMediaType) == 0 { + defaultMediaType = runtime.ContentTypeJSON + } + return &DefaultStorageFactory{ + StorageConfig: config, + Overrides: map[schema.GroupResource]groupResourceOverrides{}, + DefaultMediaType: defaultMediaType, + DefaultSerializer: defaultSerializer, + ResourceEncodingConfig: resourceEncodingConfig, + APIResourceConfigSource: resourceConfig, + DefaultResourcePrefixes: specialDefaultResourcePrefixes, + + newStorageCodecFn: NewStorageCodec, + } +} + +func (s *DefaultStorageFactory) SetEtcdLocation(groupResource schema.GroupResource, location []string) { + overrides := s.Overrides[groupResource] + overrides.etcdLocation = location + s.Overrides[groupResource] = overrides +} + +func (s *DefaultStorageFactory) SetEtcdPrefix(groupResource schema.GroupResource, prefix string) { + overrides := s.Overrides[groupResource] + overrides.etcdPrefix = prefix + s.Overrides[groupResource] = overrides +} + +// SetDisableAPIListChunking allows a specific resource to disable paging at the storage layer, to prevent +// exposure of key names in continuations. This may be overridden by feature gates. +func (s *DefaultStorageFactory) SetDisableAPIListChunking(groupResource schema.GroupResource) { + overrides := s.Overrides[groupResource] + overrides.disablePaging = true + s.Overrides[groupResource] = overrides +} + +// SetResourceEtcdPrefix sets the prefix for a resource, but not the base-dir. You'll end up in `etcdPrefix/resourceEtcdPrefix`. +func (s *DefaultStorageFactory) SetResourceEtcdPrefix(groupResource schema.GroupResource, prefix string) { + overrides := s.Overrides[groupResource] + overrides.etcdResourcePrefix = prefix + s.Overrides[groupResource] = overrides +} + +func (s *DefaultStorageFactory) SetSerializer(groupResource schema.GroupResource, mediaType string, serializer runtime.StorageSerializer) { + overrides := s.Overrides[groupResource] + overrides.mediaType = mediaType + overrides.serializer = serializer + s.Overrides[groupResource] = overrides +} + +func (s *DefaultStorageFactory) SetTransformer(groupResource schema.GroupResource, transformer value.Transformer) { + overrides := s.Overrides[groupResource] + overrides.transformer = transformer + s.Overrides[groupResource] = overrides +} + +// AddCohabitatingResources links resources together the order of the slice matters! its the priority order of lookup for finding a storage location +func (s *DefaultStorageFactory) AddCohabitatingResources(groupResources ...schema.GroupResource) { + for _, groupResource := range groupResources { + overrides := s.Overrides[groupResource] + overrides.cohabitatingResources = groupResources + s.Overrides[groupResource] = overrides + } +} + +func (s *DefaultStorageFactory) AddSerializationChains(encoderDecoratorFn func(runtime.Encoder) runtime.Encoder, decoderDecoratorFn func([]runtime.Decoder) []runtime.Decoder, groupResources ...schema.GroupResource) { + for _, groupResource := range groupResources { + overrides := s.Overrides[groupResource] + overrides.encoderDecoratorFn = encoderDecoratorFn + overrides.decoderDecoratorFn = decoderDecoratorFn + s.Overrides[groupResource] = overrides + } +} + +func getAllResourcesAlias(resource schema.GroupResource) schema.GroupResource { + return schema.GroupResource{Group: resource.Group, Resource: AllResources} +} + +func (s *DefaultStorageFactory) getStorageGroupResource(groupResource schema.GroupResource) schema.GroupResource { + for _, potentialStorageResource := range s.Overrides[groupResource].cohabitatingResources { + if s.APIResourceConfigSource.AnyVersionForGroupEnabled(potentialStorageResource.Group) { + return potentialStorageResource + } + } + + return groupResource +} + +// New finds the storage destination for the given group and resource. It will +// return an error if the group has no storage destination configured. +func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource) (*storagebackend.Config, error) { + chosenStorageResource := s.getStorageGroupResource(groupResource) + + // operate on copy + storageConfig := s.StorageConfig + codecConfig := StorageCodecConfig{ + StorageMediaType: s.DefaultMediaType, + StorageSerializer: s.DefaultSerializer, + } + + if override, ok := s.Overrides[getAllResourcesAlias(chosenStorageResource)]; ok { + override.Apply(&storageConfig, &codecConfig) + } + if override, ok := s.Overrides[chosenStorageResource]; ok { + override.Apply(&storageConfig, &codecConfig) + } + + var err error + codecConfig.StorageVersion, err = s.ResourceEncodingConfig.StorageEncodingFor(chosenStorageResource) + if err != nil { + return nil, err + } + codecConfig.MemoryVersion, err = s.ResourceEncodingConfig.InMemoryEncodingFor(groupResource) + if err != nil { + return nil, err + } + codecConfig.Config = storageConfig + + storageConfig.Codec, storageConfig.EncodeVersioner, err = s.newStorageCodecFn(codecConfig) + if err != nil { + return nil, err + } + klog.V(3).Infof("storing %v in %v, reading as %v from %#v", groupResource, codecConfig.StorageVersion, codecConfig.MemoryVersion, codecConfig.Config) + + return &storageConfig, nil +} + +// Backends returns all backends for all registered storage destinations. +// Used for getting all instances for health validations. +func (s *DefaultStorageFactory) Backends() []Backend { + servers := sets.NewString(s.StorageConfig.Transport.ServerList...) + + for _, overrides := range s.Overrides { + servers.Insert(overrides.etcdLocation...) + } + + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, + } + if len(s.StorageConfig.Transport.CertFile) > 0 && len(s.StorageConfig.Transport.KeyFile) > 0 { + cert, err := tls.LoadX509KeyPair(s.StorageConfig.Transport.CertFile, s.StorageConfig.Transport.KeyFile) + if err != nil { + klog.Errorf("failed to load key pair while getting backends: %s", err) + } else { + tlsConfig.Certificates = []tls.Certificate{cert} + } + } + if len(s.StorageConfig.Transport.TrustedCAFile) > 0 { + if caCert, err := ioutil.ReadFile(s.StorageConfig.Transport.TrustedCAFile); err != nil { + klog.Errorf("failed to read ca file while getting backends: %s", err) + } else { + caPool := x509.NewCertPool() + caPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caPool + tlsConfig.InsecureSkipVerify = false + } + } + + backends := []Backend{} + for server := range servers { + backends = append(backends, Backend{ + Server: server, + // We can't share TLSConfig across different backends to avoid races. + // For more details see: http://pr.k8s.io/59338 + TLSConfig: tlsConfig.Clone(), + }) + } + return backends +} + +func (s *DefaultStorageFactory) ResourcePrefix(groupResource schema.GroupResource) string { + chosenStorageResource := s.getStorageGroupResource(groupResource) + groupOverride := s.Overrides[getAllResourcesAlias(chosenStorageResource)] + exactResourceOverride := s.Overrides[chosenStorageResource] + + etcdResourcePrefix := s.DefaultResourcePrefixes[chosenStorageResource] + if len(groupOverride.etcdResourcePrefix) > 0 { + etcdResourcePrefix = groupOverride.etcdResourcePrefix + } + if len(exactResourceOverride.etcdResourcePrefix) > 0 { + etcdResourcePrefix = exactResourceOverride.etcdResourcePrefix + } + if len(etcdResourcePrefix) == 0 { + etcdResourcePrefix = strings.ToLower(chosenStorageResource.Resource) + } + + return etcdResourcePrefix +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/OWNERS b/vendor/k8s.io/apiserver/pkg/storage/OWNERS new file mode 100644 index 0000000000..167792c327 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/OWNERS @@ -0,0 +1,26 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- lavalamp +- liggitt +- timothysc +- wojtek-t +- xiang90 +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- caesarxuchao +- mikedanese +- liggitt +- ncdc +- timothysc +- hongchaodeng +- krousey +- xiang90 +- mml +- ingvagabund +- resouer +- mbohlool +- enj diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go new file mode 100644 index 0000000000..4595177675 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -0,0 +1,1441 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "context" + "fmt" + "net/http" + "reflect" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +var ( + emptyFunc = func() {} +) + +const ( + // storageWatchListPageSize is the cacher's request chunk size of + // initial and resync watch lists to storage. + storageWatchListPageSize = int64(10000) + // defaultBookmarkFrequency defines how frequently watch bookmarks should be send + // in addition to sending a bookmark right before watch deadline. + // + // NOTE: Update `eventFreshDuration` when changing this value. + defaultBookmarkFrequency = time.Minute +) + +// Config contains the configuration for a given Cache. +type Config struct { + // An underlying storage.Interface. + Storage storage.Interface + + // An underlying storage.Versioner. + Versioner storage.Versioner + + // The Cache will be caching objects of a given Type and assumes that they + // are all stored under ResourcePrefix directory in the underlying database. + ResourcePrefix string + + // KeyFunc is used to get a key in the underlying storage for a given object. + KeyFunc func(runtime.Object) (string, error) + + // GetAttrsFunc is used to get object labels, fields + GetAttrsFunc func(runtime.Object) (label labels.Set, field fields.Set, err error) + + // IndexerFuncs is used for optimizing amount of watchers that + // needs to process an incoming event. + IndexerFuncs storage.IndexerFuncs + + // Indexers is used to accelerate the list operation, falls back to regular list + // operation if no indexer found. + Indexers *cache.Indexers + + // NewFunc is a function that creates new empty object storing a object of type Type. + NewFunc func() runtime.Object + + // NewList is a function that creates new empty object storing a list of + // objects of type Type. + NewListFunc func() runtime.Object + + Codec runtime.Codec + + Clock clock.Clock +} + +type watchersMap map[int]*cacheWatcher + +func (wm watchersMap) addWatcher(w *cacheWatcher, number int) { + wm[number] = w +} + +func (wm watchersMap) deleteWatcher(number int, done func(*cacheWatcher)) { + if watcher, ok := wm[number]; ok { + delete(wm, number) + done(watcher) + } +} + +func (wm watchersMap) terminateAll(done func(*cacheWatcher)) { + for key, watcher := range wm { + delete(wm, key) + done(watcher) + } +} + +type indexedWatchers struct { + allWatchers watchersMap + valueWatchers map[string]watchersMap +} + +func (i *indexedWatchers) addWatcher(w *cacheWatcher, number int, value string, supported bool) { + if supported { + if _, ok := i.valueWatchers[value]; !ok { + i.valueWatchers[value] = watchersMap{} + } + i.valueWatchers[value].addWatcher(w, number) + } else { + i.allWatchers.addWatcher(w, number) + } +} + +func (i *indexedWatchers) deleteWatcher(number int, value string, supported bool, done func(*cacheWatcher)) { + if supported { + i.valueWatchers[value].deleteWatcher(number, done) + if len(i.valueWatchers[value]) == 0 { + delete(i.valueWatchers, value) + } + } else { + i.allWatchers.deleteWatcher(number, done) + } +} + +func (i *indexedWatchers) terminateAll(objectType reflect.Type, done func(*cacheWatcher)) { + if len(i.allWatchers) > 0 || len(i.valueWatchers) > 0 { + klog.Warningf("Terminating all watchers from cacher %v", objectType) + } + i.allWatchers.terminateAll(done) + for _, watchers := range i.valueWatchers { + watchers.terminateAll(done) + } + i.valueWatchers = map[string]watchersMap{} +} + +// As we don't need a high precision here, we keep all watchers timeout within a +// second in a bucket, and pop up them once at the timeout. To be more specific, +// if you set fire time at X, you can get the bookmark within (X-1,X+1) period. +type watcherBookmarkTimeBuckets struct { + lock sync.Mutex + // the key of watcherBuckets is the number of seconds since createTime + watchersBuckets map[int64][]*cacheWatcher + createTime time.Time + startBucketID int64 + clock clock.Clock + bookmarkFrequency time.Duration +} + +func newTimeBucketWatchers(clock clock.Clock, bookmarkFrequency time.Duration) *watcherBookmarkTimeBuckets { + return &watcherBookmarkTimeBuckets{ + watchersBuckets: make(map[int64][]*cacheWatcher), + createTime: clock.Now(), + startBucketID: 0, + clock: clock, + bookmarkFrequency: bookmarkFrequency, + } +} + +// adds a watcher to the bucket, if the deadline is before the start, it will be +// added to the first one. +func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool { + nextTime, ok := w.nextBookmarkTime(t.clock.Now(), t.bookmarkFrequency) + if !ok { + return false + } + bucketID := int64(nextTime.Sub(t.createTime) / time.Second) + t.lock.Lock() + defer t.lock.Unlock() + if bucketID < t.startBucketID { + bucketID = t.startBucketID + } + watchers, _ := t.watchersBuckets[bucketID] + t.watchersBuckets[bucketID] = append(watchers, w) + return true +} + +func (t *watcherBookmarkTimeBuckets) popExpiredWatchers() [][]*cacheWatcher { + currentBucketID := int64(t.clock.Since(t.createTime) / time.Second) + // There should be one or two elements in almost all cases + expiredWatchers := make([][]*cacheWatcher, 0, 2) + t.lock.Lock() + defer t.lock.Unlock() + for ; t.startBucketID <= currentBucketID; t.startBucketID++ { + if watchers, ok := t.watchersBuckets[t.startBucketID]; ok { + delete(t.watchersBuckets, t.startBucketID) + expiredWatchers = append(expiredWatchers, watchers) + } + } + return expiredWatchers +} + +type filterWithAttrsFunc func(key string, l labels.Set, f fields.Set) bool + +type indexedTriggerFunc struct { + indexName string + indexerFunc storage.IndexerFunc +} + +// Cacher is responsible for serving WATCH and LIST requests for a given +// resource from its internal cache and updating its cache in the background +// based on the underlying storage contents. +// Cacher implements storage.Interface (although most of the calls are just +// delegated to the underlying storage). +type Cacher struct { + // HighWaterMarks for performance debugging. + // Important: Since HighWaterMark is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platforms + // See: https://golang.org/pkg/sync/atomic/ for more information + incomingHWM storage.HighWaterMark + // Incoming events that should be dispatched to watchers. + incoming chan watchCacheEvent + + sync.RWMutex + + // Before accessing the cacher's cache, wait for the ready to be ok. + // This is necessary to prevent users from accessing structures that are + // uninitialized or are being repopulated right now. + // ready needs to be set to false when the cacher is paused or stopped. + // ready needs to be set to true when the cacher is ready to use after + // initialization. + ready *ready + + // Underlying storage.Interface. + storage storage.Interface + + // Expected type of objects in the underlying cache. + objectType reflect.Type + + // "sliding window" of recent changes of objects and the current state. + watchCache *watchCache + reflector *cache.Reflector + + // Versioner is used to handle resource versions. + versioner storage.Versioner + + // newFunc is a function that creates new empty object storing a object of type Type. + newFunc func() runtime.Object + + // indexedTrigger is used for optimizing amount of watchers that needs to process + // an incoming event. + indexedTrigger *indexedTriggerFunc + // watchers is mapping from the value of trigger function that a + // watcher is interested into the watchers + watcherIdx int + watchers indexedWatchers + + // Defines a time budget that can be spend on waiting for not-ready watchers + // while dispatching event before shutting them down. + dispatchTimeoutBudget timeBudget + + // Handling graceful termination. + stopLock sync.RWMutex + stopped bool + stopCh chan struct{} + stopWg sync.WaitGroup + + clock clock.Clock + // timer is used to avoid unnecessary allocations in underlying watchers. + timer *time.Timer + + // dispatching determines whether there is currently dispatching of + // any event in flight. + dispatching bool + // watchersBuffer is a list of watchers potentially interested in currently + // dispatched event. + watchersBuffer []*cacheWatcher + // blockedWatchers is a list of watchers whose buffer is currently full. + blockedWatchers []*cacheWatcher + // watchersToStop is a list of watchers that were supposed to be stopped + // during current dispatching, but stopping was deferred to the end of + // dispatching that event to avoid race with closing channels in watchers. + watchersToStop []*cacheWatcher + // Maintain a timeout queue to send the bookmark event before the watcher times out. + bookmarkWatchers *watcherBookmarkTimeBuckets +} + +// NewCacherFromConfig creates a new Cacher responsible for servicing WATCH and LIST requests from +// its internal cache and updating its cache in the background based on the +// given configuration. +func NewCacherFromConfig(config Config) (*Cacher, error) { + stopCh := make(chan struct{}) + obj := config.NewFunc() + // Give this error when it is constructed rather than when you get the + // first watch item, because it's much easier to track down that way. + if err := runtime.CheckCodec(config.Codec, obj); err != nil { + return nil, fmt.Errorf("storage codec doesn't seem to match given type: %v", err) + } + + var indexedTrigger *indexedTriggerFunc + if config.IndexerFuncs != nil { + // For now, we don't support multiple trigger functions defined + // for a given resource. + if len(config.IndexerFuncs) > 1 { + return nil, fmt.Errorf("cacher %s doesn't support more than one IndexerFunc: ", reflect.TypeOf(obj).String()) + } + for key, value := range config.IndexerFuncs { + if value != nil { + indexedTrigger = &indexedTriggerFunc{ + indexName: key, + indexerFunc: value, + } + } + } + } + + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + objType := reflect.TypeOf(obj) + cacher := &Cacher{ + ready: newReady(), + storage: config.Storage, + objectType: objType, + versioner: config.Versioner, + newFunc: config.NewFunc, + indexedTrigger: indexedTrigger, + watcherIdx: 0, + watchers: indexedWatchers{ + allWatchers: make(map[int]*cacheWatcher), + valueWatchers: make(map[string]watchersMap), + }, + // TODO: Figure out the correct value for the buffer size. + incoming: make(chan watchCacheEvent, 100), + dispatchTimeoutBudget: newTimeBudget(stopCh), + // We need to (potentially) stop both: + // - wait.Until go-routine + // - reflector.ListAndWatch + // and there are no guarantees on the order that they will stop. + // So we will be simply closing the channel, and synchronizing on the WaitGroup. + stopCh: stopCh, + clock: config.Clock, + timer: time.NewTimer(time.Duration(0)), + bookmarkWatchers: newTimeBucketWatchers(config.Clock, defaultBookmarkFrequency), + } + + // Ensure that timer is stopped. + if !cacher.timer.Stop() { + // Consume triggered (but not yet received) timer event + // so that future reuse does not get a spurious timeout. + <-cacher.timer.C + } + + watchCache := newWatchCache( + config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, objType) + listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) + reflectorName := "storage/cacher.go:" + config.ResourcePrefix + + reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0) + // Configure reflector's pager to for an appropriate pagination chunk size for fetching data from + // storage. The pager falls back to full list if paginated list calls fail due to an "Expired" error. + reflector.WatchListPageSize = storageWatchListPageSize + + cacher.watchCache = watchCache + cacher.reflector = reflector + + go cacher.dispatchEvents() + + cacher.stopWg.Add(1) + go func() { + defer cacher.stopWg.Done() + defer cacher.terminateAllWatchers() + wait.Until( + func() { + if !cacher.isStopped() { + cacher.startCaching(stopCh) + } + }, time.Second, stopCh, + ) + }() + + return cacher, nil +} + +func (c *Cacher) startCaching(stopChannel <-chan struct{}) { + // The 'usable' lock is always 'RLock'able when it is safe to use the cache. + // It is safe to use the cache after a successful list until a disconnection. + // We start with usable (write) locked. The below OnReplace function will + // unlock it after a successful list. The below defer will then re-lock + // it when this function exits (always due to disconnection), only if + // we actually got a successful list. This cycle will repeat as needed. + successfulList := false + c.watchCache.SetOnReplace(func() { + successfulList = true + c.ready.set(true) + klog.V(1).Infof("cacher (%v): initialized", c.objectType.String()) + }) + defer func() { + if successfulList { + c.ready.set(false) + } + }() + + c.terminateAllWatchers() + // Note that since onReplace may be not called due to errors, we explicitly + // need to retry it on errors under lock. + // Also note that startCaching is called in a loop, so there's no need + // to have another loop here. + if err := c.reflector.ListAndWatch(stopChannel); err != nil { + klog.Errorf("cacher (%v): unexpected ListAndWatch error: %v; reinitializing...", c.objectType.String(), err) + } +} + +// Versioner implements storage.Interface. +func (c *Cacher) Versioner() storage.Versioner { + return c.storage.Versioner() +} + +// Create implements storage.Interface. +func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { + return c.storage.Create(ctx, key, obj, out, ttl) +} + +// Delete implements storage.Interface. +func (c *Cacher) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, validateDeletion storage.ValidateObjectFunc) error { + return c.storage.Delete(ctx, key, out, preconditions, validateDeletion) +} + +// Watch implements storage.Interface. +func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + pred := opts.Predicate + watchRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion) + if err != nil { + return nil, err + } + + c.ready.wait() + + triggerValue, triggerSupported := "", false + if c.indexedTrigger != nil { + for _, field := range pred.IndexFields { + if field == c.indexedTrigger.indexName { + if value, ok := pred.Field.RequiresExactMatch(field); ok { + triggerValue, triggerSupported = value, true + } + } + } + } + + // If there is indexedTrigger defined, but triggerSupported is false, + // we can't narrow the amount of events significantly at this point. + // + // That said, currently indexedTrigger is defined only for couple resources: + // Pods, Nodes, Secrets and ConfigMaps and there is only a constant + // number of watchers for which triggerSupported is false (excluding those + // issued explicitly by users). + // Thus, to reduce the risk of those watchers blocking all watchers of a + // given resource in the system, we increase the sizes of buffers for them. + chanSize := 10 + if c.indexedTrigger != nil && !triggerSupported { + // TODO: We should tune this value and ideally make it dependent on the + // number of objects of a given type and/or their churn. + chanSize = 1000 + } + + // Determine watch timeout('0' means deadline is not set, ignore checking) + deadline, _ := ctx.Deadline() + // Create a watcher here to reduce memory allocations under lock, + // given that memory allocation may trigger GC and block the thread. + // Also note that emptyFunc is a placeholder, until we will be able + // to compute watcher.forget function (which has to happen under lock). + watcher := newCacheWatcher(chanSize, filterWithAttrsFunction(key, pred), emptyFunc, c.versioner, deadline, pred.AllowWatchBookmarks, c.objectType) + + // We explicitly use thread unsafe version and do locking ourself to ensure that + // no new events will be processed in the meantime. The watchCache will be unlocked + // on return from this function. + // Note that we cannot do it under Cacher lock, to avoid a deadlock, since the + // underlying watchCache is calling processEvent under its lock. + c.watchCache.RLock() + defer c.watchCache.RUnlock() + initEvents, err := c.watchCache.GetAllEventsSinceThreadUnsafe(watchRV) + if err != nil { + // To match the uncached watch implementation, once we have passed authn/authz/admission, + // and successfully parsed a resource version, other errors must fail with a watch event of type ERROR, + // rather than a directly returned error. + return newErrWatcher(err), nil + } + + // With some events already sent, update resourceVersion so that + // events that were buffered and not yet processed won't be delivered + // to this watcher second time causing going back in time. + if len(initEvents) > 0 { + watchRV = initEvents[len(initEvents)-1].ResourceVersion + } + + func() { + c.Lock() + defer c.Unlock() + // Update watcher.forget function once we can compute it. + watcher.forget = forgetWatcher(c, c.watcherIdx, triggerValue, triggerSupported) + c.watchers.addWatcher(watcher, c.watcherIdx, triggerValue, triggerSupported) + + // Add it to the queue only when the client support watch bookmarks. + if watcher.allowWatchBookmarks { + c.bookmarkWatchers.addWatcher(watcher) + } + c.watcherIdx++ + }() + + go watcher.process(ctx, initEvents, watchRV) + return watcher, nil +} + +// WatchList implements storage.Interface. +func (c *Cacher) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return c.Watch(ctx, key, opts) +} + +// Get implements storage.Interface. +func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error { + if opts.ResourceVersion == "" { + // If resourceVersion is not specified, serve it from underlying + // storage (for backward compatibility). + return c.storage.Get(ctx, key, opts, objPtr) + } + + // If resourceVersion is specified, serve it from cache. + // It's guaranteed that the returned value is at least that + // fresh as the given resourceVersion. + getRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion) + if err != nil { + return err + } + + if getRV == 0 && !c.ready.check() { + // If Cacher is not yet initialized and we don't require any specific + // minimal resource version, simply forward the request to storage. + return c.storage.Get(ctx, key, opts, objPtr) + } + + // Do not create a trace - it's not for free and there are tons + // of Get requests. We can add it if it will be really needed. + c.ready.wait() + + objVal, err := conversion.EnforcePtr(objPtr) + if err != nil { + return err + } + + obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(getRV, key, nil) + if err != nil { + return err + } + + if exists { + elem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("non *storeElement returned from storage: %v", obj) + } + objVal.Set(reflect.ValueOf(elem.Object).Elem()) + } else { + objVal.Set(reflect.Zero(objVal.Type())) + if !opts.IgnoreNotFound { + return storage.NewKeyNotFoundError(key, int64(readResourceVersion)) + } + } + return nil +} + +// GetToList implements storage.Interface. +func (c *Cacher) GetToList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + resourceVersion := opts.ResourceVersion + pred := opts.Predicate + pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) + hasContinuation := pagingEnabled && len(pred.Continue) > 0 + hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0" + if resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact { + // If resourceVersion is not specified, serve it from underlying + // storage (for backward compatibility). If a continuation is + // requested, serve it from the underlying storage as well. + // Limits are only sent to storage when resourceVersion is non-zero + // since the watch cache isn't able to perform continuations, and + // limits are ignored when resource version is zero + return c.storage.GetToList(ctx, key, opts, listObj) + } + + // If resourceVersion is specified, serve it from cache. + // It's guaranteed that the returned value is at least that + // fresh as the given resourceVersion. + listRV, err := c.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return err + } + + if listRV == 0 && !c.ready.check() { + // If Cacher is not yet initialized and we don't require any specific + // minimal resource version, simply forward the request to storage. + return c.storage.GetToList(ctx, key, opts, listObj) + } + + trace := utiltrace.New("cacher list", utiltrace.Field{"type", c.objectType.String()}) + defer trace.LogIfLong(500 * time.Millisecond) + + c.ready.wait() + trace.Step("Ready") + + // List elements with at least 'listRV' from cache. + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + listVal, err := conversion.EnforcePtr(listPtr) + if err != nil { + return err + } + if listVal.Kind() != reflect.Slice { + return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) + } + filter := filterWithAttrsFunction(key, pred) + + obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(listRV, key, trace) + if err != nil { + return err + } + trace.Step("Got from cache") + + if exists { + elem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("non *storeElement returned from storage: %v", obj) + } + if filter(elem.Key, elem.Labels, elem.Fields) { + listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem())) + } + } + if c.versioner != nil { + if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil { + return err + } + } + return nil +} + +// List implements storage.Interface. +func (c *Cacher) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + resourceVersion := opts.ResourceVersion + pred := opts.Predicate + pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) + hasContinuation := pagingEnabled && len(pred.Continue) > 0 + hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0" + if resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact { + // If resourceVersion is not specified, serve it from underlying + // storage (for backward compatibility). If a continuation is + // requested, serve it from the underlying storage as well. + // Limits are only sent to storage when resourceVersion is non-zero + // since the watch cache isn't able to perform continuations, and + // limits are ignored when resource version is zero. + return c.storage.List(ctx, key, opts, listObj) + } + + // If resourceVersion is specified, serve it from cache. + // It's guaranteed that the returned value is at least that + // fresh as the given resourceVersion. + listRV, err := c.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return err + } + + if listRV == 0 && !c.ready.check() { + // If Cacher is not yet initialized and we don't require any specific + // minimal resource version, simply forward the request to storage. + return c.storage.List(ctx, key, opts, listObj) + } + + trace := utiltrace.New("cacher list", utiltrace.Field{"type", c.objectType.String()}) + defer trace.LogIfLong(500 * time.Millisecond) + + c.ready.wait() + trace.Step("Ready") + + // List elements with at least 'listRV' from cache. + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + listVal, err := conversion.EnforcePtr(listPtr) + if err != nil { + return err + } + if listVal.Kind() != reflect.Slice { + return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) + } + filter := filterWithAttrsFunction(key, pred) + + objs, readResourceVersion, err := c.watchCache.WaitUntilFreshAndList(listRV, pred.MatcherIndex(), trace) + if err != nil { + return err + } + trace.Step("Listed items from cache", utiltrace.Field{"count", len(objs)}) + if len(objs) > listVal.Cap() && pred.Label.Empty() && pred.Field.Empty() { + // Resize the slice appropriately, since we already know that none + // of the elements will be filtered out. + listVal.Set(reflect.MakeSlice(reflect.SliceOf(c.objectType.Elem()), 0, len(objs))) + trace.Step("Resized result") + } + for _, obj := range objs { + elem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("non *storeElement returned from storage: %v", obj) + } + if filter(elem.Key, elem.Labels, elem.Fields) { + listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem())) + } + } + trace.Step("Filtered items", utiltrace.Field{"count", listVal.Len()}) + if c.versioner != nil { + if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil { + return err + } + } + return nil +} + +// GuaranteedUpdate implements storage.Interface. +func (c *Cacher) GuaranteedUpdate( + ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, + preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, _ runtime.Object) error { + // Ignore the suggestion and try to pass down the current version of the object + // read from cache. + if elem, exists, err := c.watchCache.GetByKey(key); err != nil { + klog.Errorf("GetByKey returned error: %v", err) + } else if exists { + currObj := elem.(*storeElement).Object.DeepCopyObject() + return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj) + } + // If we couldn't get the object, fallback to no-suggestion. + return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, nil) +} + +// Count implements storage.Interface. +func (c *Cacher) Count(pathPrefix string) (int64, error) { + return c.storage.Count(pathPrefix) +} + +// baseObjectThreadUnsafe omits locking for cachingObject. +func baseObjectThreadUnsafe(object runtime.Object) runtime.Object { + if co, ok := object.(*cachingObject); ok { + return co.object + } + return object +} + +func (c *Cacher) triggerValuesThreadUnsafe(event *watchCacheEvent) ([]string, bool) { + if c.indexedTrigger == nil { + return nil, false + } + + result := make([]string, 0, 2) + result = append(result, c.indexedTrigger.indexerFunc(baseObjectThreadUnsafe(event.Object))) + if event.PrevObject == nil { + return result, true + } + prevTriggerValue := c.indexedTrigger.indexerFunc(baseObjectThreadUnsafe(event.PrevObject)) + if result[0] != prevTriggerValue { + result = append(result, prevTriggerValue) + } + return result, true +} + +func (c *Cacher) processEvent(event *watchCacheEvent) { + if curLen := int64(len(c.incoming)); c.incomingHWM.Update(curLen) { + // Monitor if this gets backed up, and how much. + klog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen) + } + c.incoming <- *event +} + +func (c *Cacher) dispatchEvents() { + // Jitter to help level out any aggregate load. + bookmarkTimer := c.clock.NewTimer(wait.Jitter(time.Second, 0.25)) + defer bookmarkTimer.Stop() + + lastProcessedResourceVersion := uint64(0) + for { + select { + case event, ok := <-c.incoming: + if !ok { + return + } + // Don't dispatch bookmarks coming from the storage layer. + // They can be very frequent (even to the level of subseconds) + // to allow efficient watch resumption on kube-apiserver restarts, + // and propagating them down may overload the whole system. + // + // TODO: If at some point we decide the performance and scalability + // footprint is acceptable, this is the place to hook them in. + // However, we then need to check if this was called as a result + // of a bookmark event or regular Add/Update/Delete operation by + // checking if resourceVersion here has changed. + if event.Type != watch.Bookmark { + c.dispatchEvent(&event) + } + lastProcessedResourceVersion = event.ResourceVersion + case <-bookmarkTimer.C(): + bookmarkTimer.Reset(wait.Jitter(time.Second, 0.25)) + // Never send a bookmark event if we did not see an event here, this is fine + // because we don't provide any guarantees on sending bookmarks. + if lastProcessedResourceVersion == 0 { + // pop expired watchers in case there has been no update + c.bookmarkWatchers.popExpiredWatchers() + continue + } + bookmarkEvent := &watchCacheEvent{ + Type: watch.Bookmark, + Object: c.newFunc(), + ResourceVersion: lastProcessedResourceVersion, + } + if err := c.versioner.UpdateObject(bookmarkEvent.Object, bookmarkEvent.ResourceVersion); err != nil { + klog.Errorf("failure to set resourceVersion to %d on bookmark event %+v", bookmarkEvent.ResourceVersion, bookmarkEvent.Object) + continue + } + c.dispatchEvent(bookmarkEvent) + case <-c.stopCh: + return + } + } +} + +func setCachingObjects(event *watchCacheEvent, versioner storage.Versioner) { + switch event.Type { + case watch.Added, watch.Modified: + if object, err := newCachingObject(event.Object); err == nil { + event.Object = object + } else { + klog.Errorf("couldn't create cachingObject from: %#v", event.Object) + } + // Don't wrap PrevObject for update event (for create events it is nil). + // We only encode those to deliver DELETE watch events, so if + // event.Object is not nil it can be used only for watchers for which + // selector was satisfied for its previous version and is no longer + // satisfied for the current version. + // This is rare enough that it doesn't justify making deep-copy of the + // object (done by newCachingObject) every time. + case watch.Deleted: + // Don't wrap Object for delete events - these are not to deliver any + // events. Only wrap PrevObject. + if object, err := newCachingObject(event.PrevObject); err == nil { + // Update resource version of the underlying object. + // event.PrevObject is used to deliver DELETE watch events and + // for them, we set resourceVersion to instead of + // the resourceVersion of the last modification of the object. + updateResourceVersionIfNeeded(object.object, versioner, event.ResourceVersion) + event.PrevObject = object + } else { + klog.Errorf("couldn't create cachingObject from: %#v", event.Object) + } + } +} + +func (c *Cacher) dispatchEvent(event *watchCacheEvent) { + c.startDispatching(event) + defer c.finishDispatching() + // Watchers stopped after startDispatching will be delayed to finishDispatching, + + // Since add() can block, we explicitly add when cacher is unlocked. + // Dispatching event in nonblocking way first, which make faster watchers + // not be blocked by slower ones. + if event.Type == watch.Bookmark { + for _, watcher := range c.watchersBuffer { + watcher.nonblockingAdd(event) + } + } else { + // Set up caching of object serializations only for dispatching this event. + // + // Storing serializations in memory would result in increased memory usage, + // but it would help for caching encodings for watches started from old + // versions. However, we still don't have a convincing data that the gain + // from it justifies increased memory usage, so for now we drop the cached + // serializations after dispatching this event. + // + // Given the deep-copies that are done to create cachingObjects, + // we try to cache serializations only if there are at least 3 watchers. + if len(c.watchersBuffer) >= 3 { + // Make a shallow copy to allow overwriting Object and PrevObject. + wcEvent := *event + setCachingObjects(&wcEvent, c.versioner) + event = &wcEvent + } + + c.blockedWatchers = c.blockedWatchers[:0] + for _, watcher := range c.watchersBuffer { + if !watcher.nonblockingAdd(event) { + c.blockedWatchers = append(c.blockedWatchers, watcher) + } + } + + if len(c.blockedWatchers) > 0 { + // dispatchEvent is called very often, so arrange + // to reuse timers instead of constantly allocating. + startTime := time.Now() + timeout := c.dispatchTimeoutBudget.takeAvailable() + c.timer.Reset(timeout) + + // Make sure every watcher will try to send event without blocking first, + // even if the timer has already expired. + timer := c.timer + for _, watcher := range c.blockedWatchers { + if !watcher.add(event, timer) { + // fired, clean the timer by set it to nil. + timer = nil + } + } + + // Stop the timer if it is not fired + if timer != nil && !timer.Stop() { + // Consume triggered (but not yet received) timer event + // so that future reuse does not get a spurious timeout. + <-timer.C + } + + c.dispatchTimeoutBudget.returnUnused(timeout - time.Since(startTime)) + } + } +} + +func (c *Cacher) startDispatchingBookmarkEvents() { + // Pop already expired watchers. However, explicitly ignore stopped ones, + // as we don't delete watcher from bookmarkWatchers when it is stopped. + for _, watchers := range c.bookmarkWatchers.popExpiredWatchers() { + for _, watcher := range watchers { + // c.Lock() is held here. + // watcher.stopThreadUnsafe() is protected by c.Lock() + if watcher.stopped { + continue + } + c.watchersBuffer = append(c.watchersBuffer, watcher) + // Requeue the watcher for the next bookmark if needed. + c.bookmarkWatchers.addWatcher(watcher) + } + } +} + +// startDispatching chooses watchers potentially interested in a given event +// a marks dispatching as true. +func (c *Cacher) startDispatching(event *watchCacheEvent) { + // It is safe to call triggerValuesThreadUnsafe here, because at this + // point only this thread can access this event (we create a separate + // watchCacheEvent for every dispatch). + triggerValues, supported := c.triggerValuesThreadUnsafe(event) + + c.Lock() + defer c.Unlock() + + c.dispatching = true + // We are reusing the slice to avoid memory reallocations in every + // dispatchEvent() call. That may prevent Go GC from freeing items + // from previous phases that are sitting behind the current length + // of the slice, but there is only a limited number of those and the + // gain from avoiding memory allocations is much bigger. + c.watchersBuffer = c.watchersBuffer[:0] + + if event.Type == watch.Bookmark { + c.startDispatchingBookmarkEvents() + // return here to reduce following code indentation and diff + return + } + + // Iterate over "allWatchers" no matter what the trigger function is. + for _, watcher := range c.watchers.allWatchers { + c.watchersBuffer = append(c.watchersBuffer, watcher) + } + if supported { + // Iterate over watchers interested in the given values of the trigger. + for _, triggerValue := range triggerValues { + for _, watcher := range c.watchers.valueWatchers[triggerValue] { + c.watchersBuffer = append(c.watchersBuffer, watcher) + } + } + } else { + // supported equal to false generally means that trigger function + // is not defined (or not aware of any indexes). In this case, + // watchers filters should generally also don't generate any + // trigger values, but can cause problems in case of some + // misconfiguration. Thus we paranoidly leave this branch. + + // Iterate over watchers interested in exact values for all values. + for _, watchers := range c.watchers.valueWatchers { + for _, watcher := range watchers { + c.watchersBuffer = append(c.watchersBuffer, watcher) + } + } + } +} + +// finishDispatching stops all the watchers that were supposed to be +// stopped in the meantime, but it was deferred to avoid closing input +// channels of watchers, as add() may still have writing to it. +// It also marks dispatching as false. +func (c *Cacher) finishDispatching() { + c.Lock() + defer c.Unlock() + c.dispatching = false + for _, watcher := range c.watchersToStop { + watcher.stopThreadUnsafe() + } + c.watchersToStop = c.watchersToStop[:0] +} + +func (c *Cacher) terminateAllWatchers() { + c.Lock() + defer c.Unlock() + c.watchers.terminateAll(c.objectType, c.stopWatcherThreadUnsafe) +} + +func (c *Cacher) stopWatcherThreadUnsafe(watcher *cacheWatcher) { + if c.dispatching { + c.watchersToStop = append(c.watchersToStop, watcher) + } else { + watcher.stopThreadUnsafe() + } +} + +func (c *Cacher) isStopped() bool { + c.stopLock.RLock() + defer c.stopLock.RUnlock() + return c.stopped +} + +// Stop implements the graceful termination. +func (c *Cacher) Stop() { + c.stopLock.Lock() + if c.stopped { + // avoid stopping twice (note: cachers are shared with subresources) + c.stopLock.Unlock() + return + } + c.stopped = true + c.stopLock.Unlock() + close(c.stopCh) + c.stopWg.Wait() +} + +func forgetWatcher(c *Cacher, index int, triggerValue string, triggerSupported bool) func() { + return func() { + c.Lock() + defer c.Unlock() + + // It's possible that the watcher is already not in the structure (e.g. in case of + // simultaneous Stop() and terminateAllWatchers(), but it is safe to call stopThreadUnsafe() + // on a watcher multiple times. + c.watchers.deleteWatcher(index, triggerValue, triggerSupported, c.stopWatcherThreadUnsafe) + } +} + +func filterWithAttrsFunction(key string, p storage.SelectionPredicate) filterWithAttrsFunc { + filterFunc := func(objKey string, label labels.Set, field fields.Set) bool { + if !hasPathPrefix(objKey, key) { + return false + } + return p.MatchesObjectAttributes(label, field) + } + return filterFunc +} + +// LastSyncResourceVersion returns resource version to which the underlying cache is synced. +func (c *Cacher) LastSyncResourceVersion() (uint64, error) { + c.ready.wait() + + resourceVersion := c.reflector.LastSyncResourceVersion() + return c.versioner.ParseResourceVersion(resourceVersion) +} + +// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher. +type cacherListerWatcher struct { + storage storage.Interface + resourcePrefix string + newListFunc func() runtime.Object +} + +// NewCacherListerWatcher returns a storage.Interface backed ListerWatcher. +func NewCacherListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { + return &cacherListerWatcher{ + storage: storage, + resourcePrefix: resourcePrefix, + newListFunc: newListFunc, + } +} + +// Implements cache.ListerWatcher interface. +func (lw *cacherListerWatcher) List(options metav1.ListOptions) (runtime.Object, error) { + list := lw.newListFunc() + pred := storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: options.Limit, + Continue: options.Continue, + } + + if err := lw.storage.List(context.TODO(), lw.resourcePrefix, storage.ListOptions{ResourceVersionMatch: options.ResourceVersionMatch, Predicate: pred}, list); err != nil { + return nil, err + } + return list, nil +} + +// Implements cache.ListerWatcher interface. +func (lw *cacherListerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) { + opts := storage.ListOptions{ + ResourceVersion: options.ResourceVersion, + Predicate: storage.Everything, + } + if utilfeature.DefaultFeatureGate.Enabled(features.EfficientWatchResumption) { + opts.ProgressNotify = true + } + return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, opts) +} + +// errWatcher implements watch.Interface to return a single error +type errWatcher struct { + result chan watch.Event +} + +func newErrWatcher(err error) *errWatcher { + // Create an error event + errEvent := watch.Event{Type: watch.Error} + switch err := err.(type) { + case runtime.Object: + errEvent.Object = err + case *errors.StatusError: + errEvent.Object = &err.ErrStatus + default: + errEvent.Object = &metav1.Status{ + Status: metav1.StatusFailure, + Message: err.Error(), + Reason: metav1.StatusReasonInternalError, + Code: http.StatusInternalServerError, + } + } + + // Create a watcher with room for a single event, populate it, and close the channel + watcher := &errWatcher{result: make(chan watch.Event, 1)} + watcher.result <- errEvent + close(watcher.result) + + return watcher +} + +// Implements watch.Interface. +func (c *errWatcher) ResultChan() <-chan watch.Event { + return c.result +} + +// Implements watch.Interface. +func (c *errWatcher) Stop() { + // no-op +} + +// cacheWatcher implements watch.Interface +// this is not thread-safe +type cacheWatcher struct { + input chan *watchCacheEvent + result chan watch.Event + done chan struct{} + filter filterWithAttrsFunc + stopped bool + forget func() + versioner storage.Versioner + // The watcher will be closed by server after the deadline, + // save it here to send bookmark events before that. + deadline time.Time + allowWatchBookmarks bool + // Object type of the cache watcher interests + objectType reflect.Type +} + +func newCacheWatcher(chanSize int, filter filterWithAttrsFunc, forget func(), versioner storage.Versioner, deadline time.Time, allowWatchBookmarks bool, objectType reflect.Type) *cacheWatcher { + return &cacheWatcher{ + input: make(chan *watchCacheEvent, chanSize), + result: make(chan watch.Event, chanSize), + done: make(chan struct{}), + filter: filter, + stopped: false, + forget: forget, + versioner: versioner, + deadline: deadline, + allowWatchBookmarks: allowWatchBookmarks, + objectType: objectType, + } +} + +// Implements watch.Interface. +func (c *cacheWatcher) ResultChan() <-chan watch.Event { + return c.result +} + +// Implements watch.Interface. +func (c *cacheWatcher) Stop() { + c.forget() +} + +// we rely on the fact that stopThredUnsafe is actually protected by Cacher.Lock() +func (c *cacheWatcher) stopThreadUnsafe() { + if !c.stopped { + c.stopped = true + close(c.done) + close(c.input) + } +} + +func (c *cacheWatcher) nonblockingAdd(event *watchCacheEvent) bool { + select { + case c.input <- event: + return true + default: + return false + } +} + +// Nil timer means that add will not block (if it can't send event immediately, it will break the watcher) +func (c *cacheWatcher) add(event *watchCacheEvent, timer *time.Timer) bool { + // Try to send the event immediately, without blocking. + if c.nonblockingAdd(event) { + return true + } + + closeFunc := func() { + // This means that we couldn't send event to that watcher. + // Since we don't want to block on it infinitely, + // we simply terminate it. + klog.V(1).Infof("Forcing watcher close due to unresponsiveness: %v", c.objectType.String()) + c.forget() + } + + if timer == nil { + closeFunc() + return false + } + + // OK, block sending, but only until timer fires. + select { + case c.input <- event: + return true + case <-timer.C: + closeFunc() + return false + } +} + +func (c *cacheWatcher) nextBookmarkTime(now time.Time, bookmarkFrequency time.Duration) (time.Time, bool) { + // We try to send bookmarks: + // (a) roughly every minute + // (b) right before the watcher timeout - for now we simply set it 2s before + // the deadline + // The former gives us periodicity if the watch breaks due to unexpected + // conditions, the later ensures that on timeout the watcher is as close to + // now as possible - this covers 99% of cases. + heartbeatTime := now.Add(bookmarkFrequency) + if c.deadline.IsZero() { + // Timeout is set by our client libraries (e.g. reflector) as well as defaulted by + // apiserver if properly configured. So this shoudln't happen in practice. + return heartbeatTime, true + } + if pretimeoutTime := c.deadline.Add(-2 * time.Second); pretimeoutTime.Before(heartbeatTime) { + heartbeatTime = pretimeoutTime + } + + if heartbeatTime.Before(now) { + return time.Time{}, false + } + return heartbeatTime, true +} + +func getEventObject(object runtime.Object) runtime.Object { + if _, ok := object.(runtime.CacheableObject); ok { + // It is safe to return without deep-copy, because the underlying + // object was already deep-copied during construction. + return object + } + return object.DeepCopyObject() +} + +func updateResourceVersionIfNeeded(object runtime.Object, versioner storage.Versioner, resourceVersion uint64) { + if _, ok := object.(*cachingObject); ok { + // We assume that for cachingObject resourceVersion was already propagated before. + return + } + if err := versioner.UpdateObject(object, resourceVersion); err != nil { + utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", resourceVersion, object, err)) + } +} + +func (c *cacheWatcher) convertToWatchEvent(event *watchCacheEvent) *watch.Event { + if event.Type == watch.Bookmark { + return &watch.Event{Type: watch.Bookmark, Object: event.Object.DeepCopyObject()} + } + + curObjPasses := event.Type != watch.Deleted && c.filter(event.Key, event.ObjLabels, event.ObjFields) + oldObjPasses := false + if event.PrevObject != nil { + oldObjPasses = c.filter(event.Key, event.PrevObjLabels, event.PrevObjFields) + } + if !curObjPasses && !oldObjPasses { + // Watcher is not interested in that object. + return nil + } + + switch { + case curObjPasses && !oldObjPasses: + return &watch.Event{Type: watch.Added, Object: getEventObject(event.Object)} + case curObjPasses && oldObjPasses: + return &watch.Event{Type: watch.Modified, Object: getEventObject(event.Object)} + case !curObjPasses && oldObjPasses: + // return a delete event with the previous object content, but with the event's resource version + oldObj := getEventObject(event.PrevObject) + updateResourceVersionIfNeeded(oldObj, c.versioner, event.ResourceVersion) + return &watch.Event{Type: watch.Deleted, Object: oldObj} + } + + return nil +} + +// NOTE: sendWatchCacheEvent is assumed to not modify !!! +func (c *cacheWatcher) sendWatchCacheEvent(event *watchCacheEvent) { + watchEvent := c.convertToWatchEvent(event) + if watchEvent == nil { + // Watcher is not interested in that object. + return + } + + // We need to ensure that if we put event X to the c.result, all + // previous events were already put into it before, no matter whether + // c.done is close or not. + // Thus we cannot simply select from c.done and c.result and this + // would give us non-determinism. + // At the same time, we don't want to block infinitely on putting + // to c.result, when c.done is already closed. + + // This ensures that with c.done already close, we at most once go + // into the next select after this. With that, no matter which + // statement we choose there, we will deliver only consecutive + // events. + select { + case <-c.done: + return + default: + } + + select { + case c.result <- *watchEvent: + case <-c.done: + } +} + +func (c *cacheWatcher) process(ctx context.Context, initEvents []*watchCacheEvent, resourceVersion uint64) { + defer utilruntime.HandleCrash() + + // Check how long we are processing initEvents. + // As long as these are not processed, we are not processing + // any incoming events, so if it takes long, we may actually + // block all watchers for some time. + // TODO: From the logs it seems that there happens processing + // times even up to 1s which is very long. However, this doesn't + // depend that much on the number of initEvents. E.g. from the + // 2000-node Kubemark run we have logs like this, e.g.: + // ... processing 13862 initEvents took 66.808689ms + // ... processing 14040 initEvents took 993.532539ms + // We should understand what is blocking us in those cases (e.g. + // is it lack of CPU, network, or sth else) and potentially + // consider increase size of result buffer in those cases. + const initProcessThreshold = 500 * time.Millisecond + startTime := time.Now() + for _, event := range initEvents { + c.sendWatchCacheEvent(event) + } + objType := c.objectType.String() + if len(initEvents) > 0 { + initCounter.WithLabelValues(objType).Add(float64(len(initEvents))) + } + processingTime := time.Since(startTime) + if processingTime > initProcessThreshold { + klog.V(2).Infof("processing %d initEvents of %s took %v", len(initEvents), objType, processingTime) + } + + defer close(c.result) + defer c.Stop() + for { + select { + case event, ok := <-c.input: + if !ok { + return + } + // only send events newer than resourceVersion + if event.ResourceVersion > resourceVersion { + c.sendWatchCacheEvent(event) + } + case <-ctx.Done(): + return + } + } +} + +type ready struct { + ok bool + c *sync.Cond +} + +func newReady() *ready { + return &ready{c: sync.NewCond(&sync.RWMutex{})} +} + +func (r *ready) wait() { + r.c.L.Lock() + for !r.ok { + r.c.Wait() + } + r.c.L.Unlock() +} + +// TODO: Make check() function more sophisticated, in particular +// allow it to behave as "waitWithTimeout". +func (r *ready) check() bool { + rwMutex := r.c.L.(*sync.RWMutex) + rwMutex.RLock() + defer rwMutex.RUnlock() + return r.ok +} + +func (r *ready) set(ok bool) { + r.c.L.Lock() + defer r.c.L.Unlock() + r.ok = ok + r.c.Broadcast() +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go new file mode 100644 index 0000000000..752a28714c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go @@ -0,0 +1,397 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "bytes" + "fmt" + "io" + "reflect" + "runtime/debug" + "sync" + "sync/atomic" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" +) + +var _ runtime.CacheableObject = &cachingObject{} + +// metaRuntimeInterface implements runtime.Object and +// metav1.Object interfaces. +type metaRuntimeInterface interface { + runtime.Object + metav1.Object +} + +// serializationResult captures a result of serialization. +type serializationResult struct { + // once should be used to ensure serialization is computed once. + once sync.Once + + // raw is serialized object. + raw []byte + // err is error from serialization. + err error +} + +// serializationsCache is a type for caching serialization results. +type serializationsCache map[runtime.Identifier]*serializationResult + +// cachingObject is an object that is able to cache its serializations +// so that each of those is computed exactly once. +// +// cachingObject implements the metav1.Object interface (accessors for +// all metadata fields). However, setters for all fields except from +// SelfLink (which is set lately in the path) are ignored. +type cachingObject struct { + lock sync.RWMutex + + // Object for which serializations are cached. + object metaRuntimeInterface + + // serializations is a cache containing object`s serializations. + // The value stored in atomic.Value is of type serializationsCache. + // The atomic.Value type is used to allow fast-path. + serializations atomic.Value +} + +// newCachingObject performs a deep copy of the given object and wraps it +// into a cachingObject. +// An error is returned if it's not possible to cast the object to +// metav1.Object type. +func newCachingObject(object runtime.Object) (*cachingObject, error) { + if obj, ok := object.(metaRuntimeInterface); ok { + result := &cachingObject{object: obj.DeepCopyObject().(metaRuntimeInterface)} + result.serializations.Store(make(serializationsCache)) + return result, nil + } + return nil, fmt.Errorf("can't cast object to metav1.Object: %#v", object) +} + +func (o *cachingObject) getSerializationResult(id runtime.Identifier) *serializationResult { + // Fast-path for getting from cache. + serializations := o.serializations.Load().(serializationsCache) + if result, exists := serializations[id]; exists { + return result + } + + // Slow-path (that may require insert). + o.lock.Lock() + defer o.lock.Unlock() + + serializations = o.serializations.Load().(serializationsCache) + // Check if in the meantime it wasn't inserted. + if result, exists := serializations[id]; exists { + return result + } + + // Insert an entry for . This requires copy of existing map. + newSerializations := make(serializationsCache) + for k, v := range serializations { + newSerializations[k] = v + } + result := &serializationResult{} + newSerializations[id] = result + o.serializations.Store(newSerializations) + return result +} + +// CacheEncode implements runtime.CacheableObject interface. +// It serializes the object and writes the result to given io.Writer trying +// to first use the already cached result and falls back to a given encode +// function in case of cache miss. +// It assumes that for a given identifier, the encode function always encodes +// each input object into the same output format. +func (o *cachingObject) CacheEncode(id runtime.Identifier, encode func(runtime.Object, io.Writer) error, w io.Writer) error { + result := o.getSerializationResult(id) + result.once.Do(func() { + buffer := bytes.NewBuffer(nil) + result.err = encode(o.GetObject(), buffer) + result.raw = buffer.Bytes() + }) + // Once invoked, fields of serialization will not change. + if result.err != nil { + return result.err + } + _, err := w.Write(result.raw) + return err +} + +// GetObject implements runtime.CacheableObject interface. +// It returns deep-copy of the wrapped object to return ownership of it +// to the called according to the contract of the interface. +func (o *cachingObject) GetObject() runtime.Object { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.DeepCopyObject().(metaRuntimeInterface) +} + +// GetObjectKind implements runtime.Object interface. +func (o *cachingObject) GetObjectKind() schema.ObjectKind { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetObjectKind() +} + +// DeepCopyObject implements runtime.Object interface. +func (o *cachingObject) DeepCopyObject() runtime.Object { + // DeepCopyObject on cachingObject is not expected to be called anywhere. + // However, to be on the safe-side, we implement it, though given the + // cache is only an optimization we ignore copying it. + result := &cachingObject{} + result.serializations.Store(make(serializationsCache)) + + o.lock.RLock() + defer o.lock.RUnlock() + result.object = o.object.DeepCopyObject().(metaRuntimeInterface) + return result +} + +var ( + invalidationCacheTimestampLock sync.Mutex + invalidationCacheTimestamp time.Time +) + +// shouldLogCacheInvalidation allows for logging cache-invalidation +// at most once per second (to avoid spamming logs in case of issues). +func shouldLogCacheInvalidation(now time.Time) bool { + invalidationCacheTimestampLock.Lock() + defer invalidationCacheTimestampLock.Unlock() + if invalidationCacheTimestamp.Add(time.Second).Before(now) { + invalidationCacheTimestamp = now + return true + } + return false +} + +func (o *cachingObject) invalidateCacheLocked() { + if cache, ok := o.serializations.Load().(serializationsCache); ok && len(cache) == 0 { + return + } + // We don't expect cache invalidation to happen - so we want + // to log the stacktrace to allow debugging if that will happen. + // OTOH, we don't want to spam logs with it. + // So we try to log it at most once per second. + if shouldLogCacheInvalidation(time.Now()) { + klog.Warningf("Unexpected cache invalidation for %#v\n%s", o.object, string(debug.Stack())) + } + o.serializations.Store(make(serializationsCache)) +} + +// The following functions implement metav1.Object interface: +// - getters simply delegate for the underlying object +// - setters check if operations isn't noop and if so, +// invalidate the cache and delegate for the underlying object + +func (o *cachingObject) conditionalSet(isNoop func() bool, set func()) { + if fastPath := func() bool { + o.lock.RLock() + defer o.lock.RUnlock() + return isNoop() + }(); fastPath { + return + } + o.lock.Lock() + defer o.lock.Unlock() + if isNoop() { + return + } + o.invalidateCacheLocked() + set() +} + +func (o *cachingObject) GetNamespace() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetNamespace() +} +func (o *cachingObject) SetNamespace(namespace string) { + o.conditionalSet( + func() bool { return o.object.GetNamespace() == namespace }, + func() { o.object.SetNamespace(namespace) }, + ) +} +func (o *cachingObject) GetName() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetName() +} +func (o *cachingObject) SetName(name string) { + o.conditionalSet( + func() bool { return o.object.GetName() == name }, + func() { o.object.SetName(name) }, + ) +} +func (o *cachingObject) GetGenerateName() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetGenerateName() +} +func (o *cachingObject) SetGenerateName(name string) { + o.conditionalSet( + func() bool { return o.object.GetGenerateName() == name }, + func() { o.object.SetGenerateName(name) }, + ) +} +func (o *cachingObject) GetUID() types.UID { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetUID() +} +func (o *cachingObject) SetUID(uid types.UID) { + o.conditionalSet( + func() bool { return o.object.GetUID() == uid }, + func() { o.object.SetUID(uid) }, + ) +} +func (o *cachingObject) GetResourceVersion() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetResourceVersion() +} +func (o *cachingObject) SetResourceVersion(version string) { + o.conditionalSet( + func() bool { return o.object.GetResourceVersion() == version }, + func() { o.object.SetResourceVersion(version) }, + ) +} +func (o *cachingObject) GetGeneration() int64 { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetGeneration() +} +func (o *cachingObject) SetGeneration(generation int64) { + o.conditionalSet( + func() bool { return o.object.GetGeneration() == generation }, + func() { o.object.SetGeneration(generation) }, + ) +} +func (o *cachingObject) GetSelfLink() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetSelfLink() +} +func (o *cachingObject) SetSelfLink(selfLink string) { + o.conditionalSet( + func() bool { return o.object.GetSelfLink() == selfLink }, + func() { o.object.SetSelfLink(selfLink) }, + ) +} +func (o *cachingObject) GetCreationTimestamp() metav1.Time { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetCreationTimestamp() +} +func (o *cachingObject) SetCreationTimestamp(timestamp metav1.Time) { + o.conditionalSet( + func() bool { return o.object.GetCreationTimestamp() == timestamp }, + func() { o.object.SetCreationTimestamp(timestamp) }, + ) +} +func (o *cachingObject) GetDeletionTimestamp() *metav1.Time { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetDeletionTimestamp() +} +func (o *cachingObject) SetDeletionTimestamp(timestamp *metav1.Time) { + o.conditionalSet( + func() bool { return o.object.GetDeletionTimestamp() == timestamp }, + func() { o.object.SetDeletionTimestamp(timestamp) }, + ) +} +func (o *cachingObject) GetDeletionGracePeriodSeconds() *int64 { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetDeletionGracePeriodSeconds() +} +func (o *cachingObject) SetDeletionGracePeriodSeconds(gracePeriodSeconds *int64) { + o.conditionalSet( + func() bool { return o.object.GetDeletionGracePeriodSeconds() == gracePeriodSeconds }, + func() { o.object.SetDeletionGracePeriodSeconds(gracePeriodSeconds) }, + ) +} +func (o *cachingObject) GetLabels() map[string]string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetLabels() +} +func (o *cachingObject) SetLabels(labels map[string]string) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetLabels(), labels) }, + func() { o.object.SetLabels(labels) }, + ) +} +func (o *cachingObject) GetAnnotations() map[string]string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetAnnotations() +} +func (o *cachingObject) SetAnnotations(annotations map[string]string) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetAnnotations(), annotations) }, + func() { o.object.SetAnnotations(annotations) }, + ) +} +func (o *cachingObject) GetFinalizers() []string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetFinalizers() +} +func (o *cachingObject) SetFinalizers(finalizers []string) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetFinalizers(), finalizers) }, + func() { o.object.SetFinalizers(finalizers) }, + ) +} +func (o *cachingObject) GetOwnerReferences() []metav1.OwnerReference { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetOwnerReferences() +} +func (o *cachingObject) SetOwnerReferences(references []metav1.OwnerReference) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetOwnerReferences(), references) }, + func() { o.object.SetOwnerReferences(references) }, + ) +} +func (o *cachingObject) GetClusterName() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetClusterName() +} +func (o *cachingObject) SetClusterName(clusterName string) { + o.conditionalSet( + func() bool { return o.object.GetClusterName() == clusterName }, + func() { o.object.SetClusterName(clusterName) }, + ) +} +func (o *cachingObject) GetManagedFields() []metav1.ManagedFieldsEntry { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetManagedFields() +} +func (o *cachingObject) SetManagedFields(managedFields []metav1.ManagedFieldsEntry) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetManagedFields(), managedFields) }, + func() { o.object.SetManagedFields(managedFields) }, + ) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go new file mode 100644 index 0000000000..19cd5da6af --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go @@ -0,0 +1,74 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + initCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "apiserver_init_events_total", + Help: "Counter of init events processed in watchcache broken by resource type.", + StabilityLevel: metrics.ALPHA, + }, + []string{"resource"}, + ) + + watchCacheCapacityIncreaseTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "watch_cache_capacity_increase_total", + Help: "Total number of watch cache capacity increase events broken by resource type.", + StabilityLevel: metrics.ALPHA, + }, + []string{"resource"}, + ) + + watchCacheCapacityDecreaseTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "watch_cache_capacity_decrease_total", + Help: "Total number of watch cache capacity decrease events broken by resource type.", + StabilityLevel: metrics.ALPHA, + }, + []string{"resource"}, + ) +) + +func init() { + legacyregistry.MustRegister(initCounter) + legacyregistry.MustRegister(watchCacheCapacityIncreaseTotal) + legacyregistry.MustRegister(watchCacheCapacityDecreaseTotal) +} + +// recordsWatchCacheCapacityChange record watchCache capacity resize(increase or decrease) operations. +func recordsWatchCacheCapacityChange(objType string, old, new int) { + if old < new { + watchCacheCapacityIncreaseTotal.WithLabelValues(objType).Inc() + return + } + watchCacheCapacityDecreaseTotal.WithLabelValues(objType).Inc() +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go new file mode 100644 index 0000000000..2eb0fed32d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go @@ -0,0 +1,100 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "sync" + "time" +) + +const ( + refreshPerSecond = 50 * time.Millisecond + maxBudget = 100 * time.Millisecond +) + +// timeBudget implements a budget of time that you can use and is +// periodically being refreshed. The pattern to use it is: +// budget := newTimeBudget(...) +// ... +// timeout := budget.takeAvailable() +// // Now you can spend at most timeout on doing stuff +// ... +// // If you didn't use all timeout, return what you didn't use +// budget.returnUnused() +// +// NOTE: It's not recommended to be used concurrently from multiple threads - +// if first user takes the whole timeout, the second one will get 0 timeout +// even though the first one may return something later. +type timeBudget interface { + takeAvailable() time.Duration + returnUnused(unused time.Duration) +} + +type timeBudgetImpl struct { + sync.Mutex + budget time.Duration + + refresh time.Duration + maxBudget time.Duration +} + +func newTimeBudget(stopCh <-chan struct{}) timeBudget { + result := &timeBudgetImpl{ + budget: time.Duration(0), + refresh: refreshPerSecond, + maxBudget: maxBudget, + } + go result.periodicallyRefresh(stopCh) + return result +} + +func (t *timeBudgetImpl) periodicallyRefresh(stopCh <-chan struct{}) { + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + t.Lock() + if t.budget = t.budget + t.refresh; t.budget > t.maxBudget { + t.budget = t.maxBudget + } + t.Unlock() + case <-stopCh: + return + } + } +} + +func (t *timeBudgetImpl) takeAvailable() time.Duration { + t.Lock() + defer t.Unlock() + result := t.budget + t.budget = time.Duration(0) + return result +} + +func (t *timeBudgetImpl) returnUnused(unused time.Duration) { + t.Lock() + defer t.Unlock() + if unused < 0 { + // We used more than allowed. + return + } + if t.budget = t.budget + unused; t.budget > t.maxBudget { + t.budget = t.maxBudget + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/util.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/util.go new file mode 100644 index 0000000000..7943a93dca --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/util.go @@ -0,0 +1,60 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "strings" +) + +// hasPathPrefix returns true if the string matches pathPrefix exactly, or if is prefixed with pathPrefix at a path segment boundary +func hasPathPrefix(s, pathPrefix string) bool { + // Short circuit if s doesn't contain the prefix at all + if !strings.HasPrefix(s, pathPrefix) { + return false + } + + pathPrefixLength := len(pathPrefix) + + if len(s) == pathPrefixLength { + // Exact match + return true + } + if strings.HasSuffix(pathPrefix, "/") { + // pathPrefix already ensured a path segment boundary + return true + } + if s[pathPrefixLength:pathPrefixLength+1] == "/" { + // The next character in s is a path segment boundary + // Check this instead of normalizing pathPrefix to avoid allocating on every call + return true + } + return false +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go new file mode 100644 index 0000000000..dafcb39963 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -0,0 +1,631 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "fmt" + "reflect" + "sort" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/storage" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +const ( + // blockTimeout determines how long we're willing to block the request + // to wait for a given resource version to be propagated to cache, + // before terminating request and returning Timeout error with retry + // after suggestion. + blockTimeout = 3 * time.Second + + // resourceVersionTooHighRetrySeconds is the seconds before a operation should be retried by the client + // after receiving a 'too high resource version' error. + resourceVersionTooHighRetrySeconds = 1 + + // eventFreshDuration is time duration of events we want to keep. + // We set it to `defaultBookmarkFrequency` plus epsilon to maximize + // chances that last bookmark was sent within kept history, at the + // same time, minimizing the needed memory usage. + eventFreshDuration = 75 * time.Second + + // defaultLowerBoundCapacity is a default value for event cache capacity's lower bound. + // TODO: Figure out, to what value we can decreased it. + defaultLowerBoundCapacity = 100 + + // defaultUpperBoundCapacity should be able to keep eventFreshDuration of history. + defaultUpperBoundCapacity = 100 * 1024 +) + +// watchCacheEvent is a single "watch event" that is send to users of +// watchCache. Additionally to a typical "watch.Event" it contains +// the previous value of the object to enable proper filtering in the +// upper layers. +type watchCacheEvent struct { + Type watch.EventType + Object runtime.Object + ObjLabels labels.Set + ObjFields fields.Set + PrevObject runtime.Object + PrevObjLabels labels.Set + PrevObjFields fields.Set + Key string + ResourceVersion uint64 + RecordTime time.Time +} + +// Computing a key of an object is generally non-trivial (it performs +// e.g. validation underneath). Similarly computing object fields and +// labels. To avoid computing them multiple times (to serve the event +// in different List/Watch requests), in the underlying store we are +// keeping structs (key, object, labels, fields). +type storeElement struct { + Key string + Object runtime.Object + Labels labels.Set + Fields fields.Set +} + +func storeElementKey(obj interface{}) (string, error) { + elem, ok := obj.(*storeElement) + if !ok { + return "", fmt.Errorf("not a storeElement: %v", obj) + } + return elem.Key, nil +} + +func storeElementObject(obj interface{}) (runtime.Object, error) { + elem, ok := obj.(*storeElement) + if !ok { + return nil, fmt.Errorf("not a storeElement: %v", obj) + } + return elem.Object, nil +} + +func storeElementIndexFunc(objIndexFunc cache.IndexFunc) cache.IndexFunc { + return func(obj interface{}) (strings []string, e error) { + seo, err := storeElementObject(obj) + if err != nil { + return nil, err + } + return objIndexFunc(seo) + } +} + +func storeElementIndexers(indexers *cache.Indexers) cache.Indexers { + if indexers == nil { + return cache.Indexers{} + } + ret := cache.Indexers{} + for indexName, indexFunc := range *indexers { + ret[indexName] = storeElementIndexFunc(indexFunc) + } + return ret +} + +// watchCache implements a Store interface. +// However, it depends on the elements implementing runtime.Object interface. +// +// watchCache is a "sliding window" (with a limited capacity) of objects +// observed from a watch. +type watchCache struct { + sync.RWMutex + + // Condition on which lists are waiting for the fresh enough + // resource version. + cond *sync.Cond + + // Maximum size of history window. + capacity int + + // upper bound of capacity since event cache has a dynamic size. + upperBoundCapacity int + + // lower bound of capacity since event cache has a dynamic size. + lowerBoundCapacity int + + // keyFunc is used to get a key in the underlying storage for a given object. + keyFunc func(runtime.Object) (string, error) + + // getAttrsFunc is used to get labels and fields of an object. + getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error) + + // cache is used a cyclic buffer - its first element (with the smallest + // resourceVersion) is defined by startIndex, its last element is defined + // by endIndex (if cache is full it will be startIndex + capacity). + // Both startIndex and endIndex can be greater than buffer capacity - + // you should always apply modulo capacity to get an index in cache array. + cache []*watchCacheEvent + startIndex int + endIndex int + + // store will effectively support LIST operation from the "end of cache + // history" i.e. from the moment just after the newest cached watched event. + // It is necessary to effectively allow clients to start watching at now. + // NOTE: We assume that is thread-safe. + store cache.Indexer + + // ResourceVersion up to which the watchCache is propagated. + resourceVersion uint64 + + // ResourceVersion of the last list result (populated via Replace() method). + listResourceVersion uint64 + + // This handler is run at the end of every successful Replace() method. + onReplace func() + + // This handler is run at the end of every Add/Update/Delete method + // and additionally gets the previous value of the object. + eventHandler func(*watchCacheEvent) + + // for testing timeouts. + clock clock.Clock + + // An underlying storage.Versioner. + versioner storage.Versioner + + // cacher's objectType. + objectType reflect.Type +} + +func newWatchCache( + keyFunc func(runtime.Object) (string, error), + eventHandler func(*watchCacheEvent), + getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error), + versioner storage.Versioner, + indexers *cache.Indexers, + clock clock.Clock, + objectType reflect.Type) *watchCache { + wc := &watchCache{ + capacity: defaultLowerBoundCapacity, + keyFunc: keyFunc, + getAttrsFunc: getAttrsFunc, + cache: make([]*watchCacheEvent, defaultLowerBoundCapacity), + lowerBoundCapacity: defaultLowerBoundCapacity, + upperBoundCapacity: defaultUpperBoundCapacity, + startIndex: 0, + endIndex: 0, + store: cache.NewIndexer(storeElementKey, storeElementIndexers(indexers)), + resourceVersion: 0, + listResourceVersion: 0, + eventHandler: eventHandler, + clock: clock, + versioner: versioner, + objectType: objectType, + } + wc.cond = sync.NewCond(wc.RLocker()) + return wc +} + +// Add takes runtime.Object as an argument. +func (w *watchCache) Add(obj interface{}) error { + object, resourceVersion, err := w.objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Added, Object: object} + + f := func(elem *storeElement) error { return w.store.Add(elem) } + return w.processEvent(event, resourceVersion, f) +} + +// Update takes runtime.Object as an argument. +func (w *watchCache) Update(obj interface{}) error { + object, resourceVersion, err := w.objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Modified, Object: object} + + f := func(elem *storeElement) error { return w.store.Update(elem) } + return w.processEvent(event, resourceVersion, f) +} + +// Delete takes runtime.Object as an argument. +func (w *watchCache) Delete(obj interface{}) error { + object, resourceVersion, err := w.objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Deleted, Object: object} + + f := func(elem *storeElement) error { return w.store.Delete(elem) } + return w.processEvent(event, resourceVersion, f) +} + +func (w *watchCache) objectToVersionedRuntimeObject(obj interface{}) (runtime.Object, uint64, error) { + object, ok := obj.(runtime.Object) + if !ok { + return nil, 0, fmt.Errorf("obj does not implement runtime.Object interface: %v", obj) + } + resourceVersion, err := w.versioner.ObjectResourceVersion(object) + if err != nil { + return nil, 0, err + } + return object, resourceVersion, nil +} + +// processEvent is safe as long as there is at most one call to it in flight +// at any point in time. +func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, updateFunc func(*storeElement) error) error { + key, err := w.keyFunc(event.Object) + if err != nil { + return fmt.Errorf("couldn't compute key: %v", err) + } + elem := &storeElement{Key: key, Object: event.Object} + elem.Labels, elem.Fields, err = w.getAttrsFunc(event.Object) + if err != nil { + return err + } + + wcEvent := &watchCacheEvent{ + Type: event.Type, + Object: elem.Object, + ObjLabels: elem.Labels, + ObjFields: elem.Fields, + Key: key, + ResourceVersion: resourceVersion, + RecordTime: w.clock.Now(), + } + + if err := func() error { + // TODO: We should consider moving this lock below after the watchCacheEvent + // is created. In such situation, the only problematic scenario is Replace( + // happening after getting object from store and before acquiring a lock. + // Maybe introduce another lock for this purpose. + w.Lock() + defer w.Unlock() + + previous, exists, err := w.store.Get(elem) + if err != nil { + return err + } + if exists { + previousElem := previous.(*storeElement) + wcEvent.PrevObject = previousElem.Object + wcEvent.PrevObjLabels = previousElem.Labels + wcEvent.PrevObjFields = previousElem.Fields + } + + w.updateCache(wcEvent) + w.resourceVersion = resourceVersion + defer w.cond.Broadcast() + + return updateFunc(elem) + }(); err != nil { + return err + } + + // Avoid calling event handler under lock. + // This is safe as long as there is at most one call to Add/Update/Delete and + // UpdateResourceVersion in flight at any point in time, which is true now, + // because reflector calls them synchronously from its main thread. + if w.eventHandler != nil { + w.eventHandler(wcEvent) + } + return nil +} + +// Assumes that lock is already held for write. +func (w *watchCache) updateCache(event *watchCacheEvent) { + w.resizeCacheLocked(event.RecordTime) + if w.isCacheFullLocked() { + // Cache is full - remove the oldest element. + w.startIndex++ + } + w.cache[w.endIndex%w.capacity] = event + w.endIndex++ +} + +// resizeCacheLocked resizes the cache if necessary: +// - increases capacity by 2x if cache is full and all cached events occurred within last eventFreshDuration. +// - decreases capacity by 2x when recent quarter of events occurred outside of eventFreshDuration(protect watchCache from flapping). +func (w *watchCache) resizeCacheLocked(eventTime time.Time) { + if w.isCacheFullLocked() && eventTime.Sub(w.cache[w.startIndex%w.capacity].RecordTime) < eventFreshDuration { + capacity := min(w.capacity*2, w.upperBoundCapacity) + if capacity > w.capacity { + w.doCacheResizeLocked(capacity) + } + return + } + if w.isCacheFullLocked() && eventTime.Sub(w.cache[(w.endIndex-w.capacity/4)%w.capacity].RecordTime) > eventFreshDuration { + capacity := max(w.capacity/2, w.lowerBoundCapacity) + if capacity < w.capacity { + w.doCacheResizeLocked(capacity) + } + return + } +} + +// isCacheFullLocked used to judge whether watchCacheEvent is full. +// Assumes that lock is already held for write. +func (w *watchCache) isCacheFullLocked() bool { + return w.endIndex == w.startIndex+w.capacity +} + +// doCacheResizeLocked resize watchCache's event array with different capacity. +// Assumes that lock is already held for write. +func (w *watchCache) doCacheResizeLocked(capacity int) { + newCache := make([]*watchCacheEvent, capacity) + if capacity < w.capacity { + // adjust startIndex if cache capacity shrink. + w.startIndex = w.endIndex - capacity + } + for i := w.startIndex; i < w.endIndex; i++ { + newCache[i%capacity] = w.cache[i%w.capacity] + } + w.cache = newCache + recordsWatchCacheCapacityChange(w.objectType.String(), w.capacity, capacity) + w.capacity = capacity +} + +func (w *watchCache) UpdateResourceVersion(resourceVersion string) { + rv, err := w.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + klog.Errorf("Couldn't parse resourceVersion: %v", err) + return + } + + func() { + w.Lock() + defer w.Unlock() + w.resourceVersion = rv + }() + + // Avoid calling event handler under lock. + // This is safe as long as there is at most one call to Add/Update/Delete and + // UpdateResourceVersion in flight at any point in time, which is true now, + // because reflector calls them synchronously from its main thread. + if w.eventHandler != nil { + wcEvent := &watchCacheEvent{ + Type: watch.Bookmark, + ResourceVersion: rv, + } + w.eventHandler(wcEvent) + } +} + +// List returns list of pointers to objects. +func (w *watchCache) List() []interface{} { + return w.store.List() +} + +// waitUntilFreshAndBlock waits until cache is at least as fresh as given . +// NOTE: This function acquired lock and doesn't release it. +// You HAVE TO explicitly call w.RUnlock() after this function. +func (w *watchCache) waitUntilFreshAndBlock(resourceVersion uint64, trace *utiltrace.Trace) error { + startTime := w.clock.Now() + go func() { + // Wake us up when the time limit has expired. The docs + // promise that time.After (well, NewTimer, which it calls) + // will wait *at least* the duration given. Since this go + // routine starts sometime after we record the start time, and + // it will wake up the loop below sometime after the broadcast, + // we don't need to worry about waking it up before the time + // has expired accidentally. + <-w.clock.After(blockTimeout) + w.cond.Broadcast() + }() + + w.RLock() + if trace != nil { + trace.Step("watchCache locked acquired") + } + for w.resourceVersion < resourceVersion { + if w.clock.Since(startTime) >= blockTimeout { + // Request that the client retry after 'resourceVersionTooHighRetrySeconds' seconds. + return storage.NewTooLargeResourceVersionError(resourceVersion, w.resourceVersion, resourceVersionTooHighRetrySeconds) + } + w.cond.Wait() + } + if trace != nil { + trace.Step("watchCache fresh enough") + } + return nil +} + +// WaitUntilFreshAndList returns list of pointers to objects. +func (w *watchCache) WaitUntilFreshAndList(resourceVersion uint64, matchValues []storage.MatchValue, trace *utiltrace.Trace) ([]interface{}, uint64, error) { + err := w.waitUntilFreshAndBlock(resourceVersion, trace) + defer w.RUnlock() + if err != nil { + return nil, 0, err + } + + // This isn't the place where we do "final filtering" - only some "prefiltering" is happening here. So the only + // requirement here is to NOT miss anything that should be returned. We can return as many non-matching items as we + // want - they will be filtered out later. The fact that we return less things is only further performance improvement. + // TODO: if multiple indexes match, return the one with the fewest items, so as to do as much filtering as possible. + for _, matchValue := range matchValues { + if result, err := w.store.ByIndex(matchValue.IndexName, matchValue.Value); err == nil { + return result, w.resourceVersion, nil + } + } + return w.store.List(), w.resourceVersion, nil +} + +// WaitUntilFreshAndGet returns a pointers to object. +func (w *watchCache) WaitUntilFreshAndGet(resourceVersion uint64, key string, trace *utiltrace.Trace) (interface{}, bool, uint64, error) { + err := w.waitUntilFreshAndBlock(resourceVersion, trace) + defer w.RUnlock() + if err != nil { + return nil, false, 0, err + } + value, exists, err := w.store.GetByKey(key) + return value, exists, w.resourceVersion, err +} + +func (w *watchCache) ListKeys() []string { + return w.store.ListKeys() +} + +// Get takes runtime.Object as a parameter. However, it returns +// pointer to . +func (w *watchCache) Get(obj interface{}) (interface{}, bool, error) { + object, ok := obj.(runtime.Object) + if !ok { + return nil, false, fmt.Errorf("obj does not implement runtime.Object interface: %v", obj) + } + key, err := w.keyFunc(object) + if err != nil { + return nil, false, fmt.Errorf("couldn't compute key: %v", err) + } + + return w.store.Get(&storeElement{Key: key, Object: object}) +} + +// GetByKey returns pointer to . +func (w *watchCache) GetByKey(key string) (interface{}, bool, error) { + return w.store.GetByKey(key) +} + +// Replace takes slice of runtime.Object as a parameter. +func (w *watchCache) Replace(objs []interface{}, resourceVersion string) error { + version, err := w.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return err + } + + toReplace := make([]interface{}, 0, len(objs)) + for _, obj := range objs { + object, ok := obj.(runtime.Object) + if !ok { + return fmt.Errorf("didn't get runtime.Object for replace: %#v", obj) + } + key, err := w.keyFunc(object) + if err != nil { + return fmt.Errorf("couldn't compute key: %v", err) + } + objLabels, objFields, err := w.getAttrsFunc(object) + if err != nil { + return err + } + toReplace = append(toReplace, &storeElement{ + Key: key, + Object: object, + Labels: objLabels, + Fields: objFields, + }) + } + + w.Lock() + defer w.Unlock() + + w.startIndex = 0 + w.endIndex = 0 + if err := w.store.Replace(toReplace, resourceVersion); err != nil { + return err + } + w.listResourceVersion = version + w.resourceVersion = version + if w.onReplace != nil { + w.onReplace() + } + w.cond.Broadcast() + klog.V(3).Infof("Replace watchCache (rev: %v) ", resourceVersion) + return nil +} + +func (w *watchCache) SetOnReplace(onReplace func()) { + w.Lock() + defer w.Unlock() + w.onReplace = onReplace +} + +func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]*watchCacheEvent, error) { + size := w.endIndex - w.startIndex + var oldest uint64 + switch { + case w.listResourceVersion > 0 && w.startIndex == 0: + // If no event was removed from the buffer since last relist, the oldest watch + // event we can deliver is one greater than the resource version of the list. + oldest = w.listResourceVersion + 1 + case size > 0: + // If the previous condition is not satisfied: either some event was already + // removed from the buffer or we've never completed a list (the latter can + // only happen in unit tests that populate the buffer without performing + // list/replace operations), the oldest watch event we can deliver is the first + // one in the buffer. + oldest = w.cache[w.startIndex%w.capacity].ResourceVersion + default: + return nil, fmt.Errorf("watch cache isn't correctly initialized") + } + + if resourceVersion == 0 { + // resourceVersion = 0 means that we don't require any specific starting point + // and we would like to start watching from ~now. + // However, to keep backward compatibility, we additionally need to return the + // current state and only then start watching from that point. + // + // TODO: In v2 api, we should stop returning the current state - #13969. + allItems := w.store.List() + result := make([]*watchCacheEvent, len(allItems)) + for i, item := range allItems { + elem, ok := item.(*storeElement) + if !ok { + return nil, fmt.Errorf("not a storeElement: %v", elem) + } + objLabels, objFields, err := w.getAttrsFunc(elem.Object) + if err != nil { + return nil, err + } + result[i] = &watchCacheEvent{ + Type: watch.Added, + Object: elem.Object, + ObjLabels: objLabels, + ObjFields: objFields, + Key: elem.Key, + ResourceVersion: w.resourceVersion, + } + } + return result, nil + } + if resourceVersion < oldest-1 { + return nil, errors.NewResourceExpired(fmt.Sprintf("too old resource version: %d (%d)", resourceVersion, oldest-1)) + } + + // Binary search the smallest index at which resourceVersion is greater than the given one. + f := func(i int) bool { + return w.cache[(w.startIndex+i)%w.capacity].ResourceVersion > resourceVersion + } + first := sort.Search(size, f) + result := make([]*watchCacheEvent, size-first) + for i := 0; i < size-first; i++ { + result[i] = w.cache[(w.startIndex+first+i)%w.capacity] + } + return result, nil +} + +func (w *watchCache) GetAllEventsSince(resourceVersion uint64) ([]*watchCacheEvent, error) { + w.RLock() + defer w.RUnlock() + return w.GetAllEventsSinceThreadUnsafe(resourceVersion) +} + +func (w *watchCache) Resync() error { + // Nothing to do + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/doc.go b/vendor/k8s.io/apiserver/pkg/storage/doc.go new file mode 100644 index 0000000000..fbdd944687 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Interfaces for database-related operations. +package storage // import "k8s.io/apiserver/pkg/storage" diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors.go b/vendor/k8s.io/apiserver/pkg/storage/errors.go new file mode 100644 index 0000000000..9c72d59fbf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/errors.go @@ -0,0 +1,195 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +const ( + ErrCodeKeyNotFound int = iota + 1 + ErrCodeKeyExists + ErrCodeResourceVersionConflicts + ErrCodeInvalidObj + ErrCodeUnreachable +) + +var errCodeToMessage = map[int]string{ + ErrCodeKeyNotFound: "key not found", + ErrCodeKeyExists: "key exists", + ErrCodeResourceVersionConflicts: "resource version conflicts", + ErrCodeInvalidObj: "invalid object", + ErrCodeUnreachable: "server unreachable", +} + +func NewKeyNotFoundError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeKeyNotFound, + Key: key, + ResourceVersion: rv, + } +} + +func NewKeyExistsError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeKeyExists, + Key: key, + ResourceVersion: rv, + } +} + +func NewResourceVersionConflictsError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeResourceVersionConflicts, + Key: key, + ResourceVersion: rv, + } +} + +func NewUnreachableError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeUnreachable, + Key: key, + ResourceVersion: rv, + } +} + +func NewInvalidObjError(key, msg string) *StorageError { + return &StorageError{ + Code: ErrCodeInvalidObj, + Key: key, + AdditionalErrorMsg: msg, + } +} + +type StorageError struct { + Code int + Key string + ResourceVersion int64 + AdditionalErrorMsg string +} + +func (e *StorageError) Error() string { + return fmt.Sprintf("StorageError: %s, Code: %d, Key: %s, ResourceVersion: %d, AdditionalErrorMsg: %s", + errCodeToMessage[e.Code], e.Code, e.Key, e.ResourceVersion, e.AdditionalErrorMsg) +} + +// IsNotFound returns true if and only if err is "key" not found error. +func IsNotFound(err error) bool { + return isErrCode(err, ErrCodeKeyNotFound) +} + +// IsNodeExist returns true if and only if err is an node already exist error. +func IsNodeExist(err error) bool { + return isErrCode(err, ErrCodeKeyExists) +} + +// IsUnreachable returns true if and only if err indicates the server could not be reached. +func IsUnreachable(err error) bool { + return isErrCode(err, ErrCodeUnreachable) +} + +// IsConflict returns true if and only if err is a write conflict. +func IsConflict(err error) bool { + return isErrCode(err, ErrCodeResourceVersionConflicts) +} + +// IsInvalidObj returns true if and only if err is invalid error +func IsInvalidObj(err error) bool { + return isErrCode(err, ErrCodeInvalidObj) +} + +func isErrCode(err error, code int) bool { + if err == nil { + return false + } + if e, ok := err.(*StorageError); ok { + return e.Code == code + } + return false +} + +// InvalidError is generated when an error caused by invalid API object occurs +// in the storage package. +type InvalidError struct { + Errs field.ErrorList +} + +func (e InvalidError) Error() string { + return e.Errs.ToAggregate().Error() +} + +// IsInvalidError returns true if and only if err is an InvalidError. +func IsInvalidError(err error) bool { + _, ok := err.(InvalidError) + return ok +} + +func NewInvalidError(errors field.ErrorList) InvalidError { + return InvalidError{errors} +} + +// InternalError is generated when an error occurs in the storage package, i.e., +// not from the underlying storage backend (e.g., etcd). +type InternalError struct { + Reason string +} + +func (e InternalError) Error() string { + return e.Reason +} + +// IsInternalError returns true if and only if err is an InternalError. +func IsInternalError(err error) bool { + _, ok := err.(InternalError) + return ok +} + +func NewInternalError(reason string) InternalError { + return InternalError{reason} +} + +func NewInternalErrorf(format string, a ...interface{}) InternalError { + return InternalError{fmt.Sprintf(format, a...)} +} + +var tooLargeResourceVersionCauseMsg = "Too large resource version" + +// NewTooLargeResourceVersionError returns a timeout error with the given retrySeconds for a request for +// a minimum resource version that is larger than the largest currently available resource version for a requested resource. +func NewTooLargeResourceVersionError(minimumResourceVersion, currentRevision uint64, retrySeconds int) error { + err := errors.NewTimeoutError(fmt.Sprintf("Too large resource version: %d, current: %d", minimumResourceVersion, currentRevision), retrySeconds) + err.ErrStatus.Details.Causes = []metav1.StatusCause{ + { + Type: metav1.CauseTypeResourceVersionTooLarge, + Message: tooLargeResourceVersionCauseMsg, + }, + } + return err +} + +// IsTooLargeResourceVersion returns true if the error is a TooLargeResourceVersion error. +func IsTooLargeResourceVersion(err error) bool { + if !errors.IsTimeout(err) { + return false + } + return errors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go b/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go new file mode 100644 index 0000000000..e251b61680 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package storage provides conversion of storage errors to API errors. +package storage // import "k8s.io/apiserver/pkg/storage/errors" diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go b/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go new file mode 100644 index 0000000000..fd3b35ed06 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go @@ -0,0 +1,116 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/storage" +) + +// InterpretListError converts a generic error on a retrieval +// operation into the appropriate API error. +func InterpretListError(err error, qualifiedResource schema.GroupResource) error { + switch { + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, "") + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "list", 2) // TODO: make configurable or handled at a higher level + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretGetError converts a generic error on a retrieval +// operation into the appropriate API error. +func InterpretGetError(err error, qualifiedResource schema.GroupResource, name string) error { + switch { + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, name) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "get", 2) // TODO: make configurable or handled at a higher level + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretCreateError converts a generic error on a create +// operation into the appropriate API error. +func InterpretCreateError(err error, qualifiedResource schema.GroupResource, name string) error { + switch { + case storage.IsNodeExist(err): + return errors.NewAlreadyExists(qualifiedResource, name) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "create", 2) // TODO: make configurable or handled at a higher level + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretUpdateError converts a generic error on an update +// operation into the appropriate API error. +func InterpretUpdateError(err error, qualifiedResource schema.GroupResource, name string) error { + switch { + case storage.IsConflict(err), storage.IsNodeExist(err), storage.IsInvalidObj(err): + return errors.NewConflict(qualifiedResource, name, err) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "update", 2) // TODO: make configurable or handled at a higher level + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, name) + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretDeleteError converts a generic error on a delete +// operation into the appropriate API error. +func InterpretDeleteError(err error, qualifiedResource schema.GroupResource, name string) error { + switch { + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, name) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "delete", 2) // TODO: make configurable or handled at a higher level + case storage.IsConflict(err), storage.IsNodeExist(err), storage.IsInvalidObj(err): + return errors.NewConflict(qualifiedResource, name, err) + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretWatchError converts a generic error on a watch +// operation into the appropriate API error. +func InterpretWatchError(err error, resource schema.GroupResource, name string) error { + switch { + case storage.IsInvalidError(err): + invalidError, _ := err.(storage.InvalidError) + return errors.NewInvalid(schema.GroupKind{Group: resource.Group, Kind: resource.Resource}, name, invalidError.Errs) + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS b/vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS new file mode 100644 index 0000000000..84666835da --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- wojtek-t +- timothysc +- madhusudancs +- hongchaodeng diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/api_object_versioner.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/api_object_versioner.go new file mode 100644 index 0000000000..c42fc6e08e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/api_object_versioner.go @@ -0,0 +1,131 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/storage" +) + +// APIObjectVersioner implements versioning and extracting etcd node information +// for objects that have an embedded ObjectMeta or ListMeta field. +type APIObjectVersioner struct{} + +// UpdateObject implements Versioner +func (a APIObjectVersioner) UpdateObject(obj runtime.Object, resourceVersion uint64) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + versionString := "" + if resourceVersion != 0 { + versionString = strconv.FormatUint(resourceVersion, 10) + } + accessor.SetResourceVersion(versionString) + return nil +} + +// UpdateList implements Versioner +func (a APIObjectVersioner) UpdateList(obj runtime.Object, resourceVersion uint64, nextKey string, count *int64) error { + if resourceVersion == 0 { + return fmt.Errorf("illegal resource version from storage: %d", resourceVersion) + } + listAccessor, err := meta.ListAccessor(obj) + if err != nil || listAccessor == nil { + return err + } + versionString := strconv.FormatUint(resourceVersion, 10) + listAccessor.SetResourceVersion(versionString) + listAccessor.SetContinue(nextKey) + listAccessor.SetRemainingItemCount(count) + return nil +} + +// PrepareObjectForStorage clears resource version and self link prior to writing to etcd. +func (a APIObjectVersioner) PrepareObjectForStorage(obj runtime.Object) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + accessor.SetResourceVersion("") + accessor.SetSelfLink("") + return nil +} + +// ObjectResourceVersion implements Versioner +func (a APIObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + version := accessor.GetResourceVersion() + if len(version) == 0 { + return 0, nil + } + return strconv.ParseUint(version, 10, 64) +} + +// ParseResourceVersion takes a resource version argument and converts it to +// the etcd version. For watch we should pass to helper.Watch(). Because resourceVersion is +// an opaque value, the default watch behavior for non-zero watch is to watch +// the next value (if you pass "1", you will see updates from "2" onwards). +func (a APIObjectVersioner) ParseResourceVersion(resourceVersion string) (uint64, error) { + if resourceVersion == "" || resourceVersion == "0" { + return 0, nil + } + version, err := strconv.ParseUint(resourceVersion, 10, 64) + if err != nil { + return 0, storage.NewInvalidError(field.ErrorList{ + // Validation errors are supposed to return version-specific field + // paths, but this is probably close enough. + field.Invalid(field.NewPath("resourceVersion"), resourceVersion, err.Error()), + }) + } + return version, nil +} + +// Versioner implements Versioner +var Versioner storage.Versioner = APIObjectVersioner{} + +// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings, +// but etcd resource versions are special, they're actually ints, so we can easily compare them. +func (a APIObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int { + lhsVersion, err := Versioner.ObjectResourceVersion(lhs) + if err != nil { + // coder error + panic(err) + } + rhsVersion, err := Versioner.ObjectResourceVersion(rhs) + if err != nil { + // coder error + panic(err) + } + + if lhsVersion == rhsVersion { + return 0 + } + if lhsVersion < rhsVersion { + return -1 + } + + return 1 +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go new file mode 100644 index 0000000000..1f97a5a77b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go @@ -0,0 +1,162 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "context" + "strconv" + "sync" + "time" + + "go.etcd.io/etcd/clientv3" + "k8s.io/klog/v2" +) + +const ( + compactRevKey = "compact_rev_key" +) + +var ( + endpointsMapMu sync.Mutex + endpointsMap map[string]struct{} +) + +func init() { + endpointsMap = make(map[string]struct{}) +} + +// StartCompactor starts a compactor in the background to compact old version of keys that's not needed. +// By default, we save the most recent 10 minutes data and compact versions > 10minutes ago. +// It should be enough for slow watchers and to tolerate burst. +// TODO: We might keep a longer history (12h) in the future once storage API can take advantage of past version of keys. +func StartCompactor(ctx context.Context, client *clientv3.Client, compactInterval time.Duration) { + endpointsMapMu.Lock() + defer endpointsMapMu.Unlock() + + // In one process, we can have only one compactor for one cluster. + // Currently we rely on endpoints to differentiate clusters. + for _, ep := range client.Endpoints() { + if _, ok := endpointsMap[ep]; ok { + klog.V(4).Infof("compactor already exists for endpoints %v", client.Endpoints()) + return + } + } + for _, ep := range client.Endpoints() { + endpointsMap[ep] = struct{}{} + } + + if compactInterval != 0 { + go compactor(ctx, client, compactInterval) + } +} + +// compactor periodically compacts historical versions of keys in etcd. +// It will compact keys with versions older than given interval. +// In other words, after compaction, it will only contain keys set during last interval. +// Any API call for the older versions of keys will return error. +// Interval is the time interval between each compaction. The first compaction happens after "interval". +func compactor(ctx context.Context, client *clientv3.Client, interval time.Duration) { + // Technical definitions: + // We have a special key in etcd defined as *compactRevKey*. + // compactRevKey's value will be set to the string of last compacted revision. + // compactRevKey's version will be used as logical time for comparison. THe version is referred as compact time. + // Initially, because the key doesn't exist, the compact time (version) is 0. + // + // Algorithm: + // - Compare to see if (local compact_time) = (remote compact_time). + // - If yes, increment both local and remote compact_time, and do a compaction. + // - If not, set local to remote compact_time. + // + // Technical details/insights: + // + // The protocol here is lease based. If one compactor CAS successfully, the others would know it when they fail in + // CAS later and would try again in 10 minutes. If an APIServer crashed, another one would "take over" the lease. + // + // For example, in the following diagram, we have a compactor C1 doing compaction in t1, t2. Another compactor C2 + // at t1' (t1 < t1' < t2) would CAS fail, set its known oldRev to rev at t1', and try again in t2' (t2' > t2). + // If C1 crashed and wouldn't compact at t2, C2 would CAS successfully at t2'. + // + // oldRev(t2) curRev(t2) + // + + // oldRev curRev | + // + + | + // | | | + // | | t1' | t2' + // +---v-------------v----^---------v------^----> + // t0 t1 t2 + // + // We have the guarantees: + // - in normal cases, the interval is 10 minutes. + // - in failover, the interval is >10m and <20m + // + // FAQ: + // - What if time is not accurate? We don't care as long as someone did the compaction. Atomicity is ensured using + // etcd API. + // - What happened under heavy load scenarios? Initially, each apiserver will do only one compaction + // every 10 minutes. This is very unlikely affecting or affected w.r.t. server load. + + var compactTime int64 + var rev int64 + var err error + for { + select { + case <-time.After(interval): + case <-ctx.Done(): + return + } + + compactTime, rev, err = compact(ctx, client, compactTime, rev) + if err != nil { + klog.Errorf("etcd: endpoint (%v) compact failed: %v", client.Endpoints(), err) + continue + } + } +} + +// compact compacts etcd store and returns current rev. +// It will return the current compact time and global revision if no error occurred. +// Note that CAS fail will not incur any error. +func compact(ctx context.Context, client *clientv3.Client, t, rev int64) (int64, int64, error) { + resp, err := client.KV.Txn(ctx).If( + clientv3.Compare(clientv3.Version(compactRevKey), "=", t), + ).Then( + clientv3.OpPut(compactRevKey, strconv.FormatInt(rev, 10)), // Expect side effect: increment Version + ).Else( + clientv3.OpGet(compactRevKey), + ).Commit() + if err != nil { + return t, rev, err + } + + curRev := resp.Header.Revision + + if !resp.Succeeded { + curTime := resp.Responses[0].GetResponseRange().Kvs[0].Version + return curTime, curRev, nil + } + curTime := t + 1 + + if rev == 0 { + // We don't compact on bootstrap. + return curTime, curRev, nil + } + if _, err = client.Compact(ctx, rev); err != nil { + return curTime, curRev, err + } + klog.V(4).Infof("etcd: compacted rev (%d), endpoints (%v)", rev, client.Endpoints()) + return curTime, curRev, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go new file mode 100644 index 0000000000..b33751480a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + + etcdrpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +func interpretWatchError(err error) error { + switch { + case err == etcdrpc.ErrCompacted: + return errors.NewResourceExpired("The resourceVersion for the provided watch is too old.") + } + return err +} + +const ( + expired string = "The resourceVersion for the provided list is too old." + continueExpired string = "The provided continue parameter is too old " + + "to display a consistent list result. You can start a new list without " + + "the continue parameter." + inconsistentContinue string = "The provided continue parameter is too old " + + "to display a consistent list result. You can start a new list without " + + "the continue parameter, or use the continue token in this response to " + + "retrieve the remainder of the results. Continuing with the provided " + + "token results in an inconsistent list - objects that were created, " + + "modified, or deleted between the time the first chunk was returned " + + "and now may show up in the list." +) + +func interpretListError(err error, paging bool, continueKey, keyPrefix string) error { + switch { + case err == etcdrpc.ErrCompacted: + if paging { + return handleCompactedErrorForPaging(continueKey, keyPrefix) + } + return errors.NewResourceExpired(expired) + } + return err +} + +func handleCompactedErrorForPaging(continueKey, keyPrefix string) error { + // continueToken.ResoureVersion=-1 means that the apiserver can + // continue the list at the latest resource version. We don't use rv=0 + // for this purpose to distinguish from a bad token that has empty rv. + newToken, err := encodeContinue(continueKey, keyPrefix, -1) + if err != nil { + utilruntime.HandleError(err) + return errors.NewResourceExpired(continueExpired) + } + statusError := errors.NewResourceExpired(inconsistentContinue) + statusError.ErrStatus.ListMeta.Continue = newToken + return statusError +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go new file mode 100644 index 0000000000..83e52c0646 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "fmt" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/mvcc/mvccpb" +) + +type event struct { + key string + value []byte + prevValue []byte + rev int64 + isDeleted bool + isCreated bool + isProgressNotify bool +} + +// parseKV converts a KeyValue retrieved from an initial sync() listing to a synthetic isCreated event. +func parseKV(kv *mvccpb.KeyValue) *event { + return &event{ + key: string(kv.Key), + value: kv.Value, + prevValue: nil, + rev: kv.ModRevision, + isDeleted: false, + isCreated: true, + } +} + +func parseEvent(e *clientv3.Event) (*event, error) { + if !e.IsCreate() && e.PrevKv == nil { + // If the previous value is nil, error. One example of how this is possible is if the previous value has been compacted already. + return nil, fmt.Errorf("etcd event received with PrevKv=nil (key=%q, modRevision=%d, type=%s)", string(e.Kv.Key), e.Kv.ModRevision, e.Type.String()) + + } + ret := &event{ + key: string(e.Kv.Key), + value: e.Kv.Value, + rev: e.Kv.ModRevision, + isDeleted: e.Type == clientv3.EventTypeDelete, + isCreated: e.IsCreate(), + } + if e.PrevKv != nil { + ret.prevValue = e.PrevKv.Value + } + return ret, nil +} + +func progressNotifyEvent(rev int64) *event { + return &event{ + rev: rev, + isProgressNotify: true, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/healthcheck.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/healthcheck.go new file mode 100644 index 0000000000..ad051d2d6c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/healthcheck.go @@ -0,0 +1,40 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "encoding/json" + "fmt" +) + +// etcdHealth encodes data returned from etcd /healthz handler. +type etcdHealth struct { + // Note this has to be public so the json library can modify it. + Health string `json:"health"` +} + +// EtcdHealthCheck decodes data returned from etcd /healthz handler. +func EtcdHealthCheck(data []byte) error { + obj := etcdHealth{} + if err := json.Unmarshal(data, &obj); err != nil { + return err + } + if obj.Health != "true" { + return fmt.Errorf("Unhealthy status: %s", obj.Health) + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go new file mode 100644 index 0000000000..6b5a5700a9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "context" + "sync" + "time" + + "go.etcd.io/etcd/clientv3" +) + +// leaseManager is used to manage leases requested from etcd. If a new write +// needs a lease that has similar expiration time to the previous one, the old +// lease will be reused to reduce the overhead of etcd, since lease operations +// are expensive. In the implementation, we only store one previous lease, +// since all the events have the same ttl. +type leaseManager struct { + client *clientv3.Client // etcd client used to grant leases + leaseMu sync.Mutex + prevLeaseID clientv3.LeaseID + prevLeaseExpirationTime time.Time + // The period of time in seconds and percent of TTL that each lease is + // reused. The minimum of them is used to avoid unreasonably large + // numbers. We use var instead of const for testing purposes. + leaseReuseDurationSeconds int64 + leaseReuseDurationPercent float64 +} + +// newDefaultLeaseManager creates a new lease manager using default setting. +func newDefaultLeaseManager(client *clientv3.Client) *leaseManager { + return newLeaseManager(client, 60, 0.05) +} + +// newLeaseManager creates a new lease manager with the number of buffered +// leases, lease reuse duration in seconds and percentage. The percentage +// value x means x*100%. +func newLeaseManager(client *clientv3.Client, leaseReuseDurationSeconds int64, leaseReuseDurationPercent float64) *leaseManager { + return &leaseManager{ + client: client, + leaseReuseDurationSeconds: leaseReuseDurationSeconds, + leaseReuseDurationPercent: leaseReuseDurationPercent, + } +} + +// setLeaseReuseDurationSeconds is used for testing purpose. It is used to +// reduce the extra lease duration to avoid unnecessary timeout in testing. +func (l *leaseManager) setLeaseReuseDurationSeconds(duration int64) { + l.leaseMu.Lock() + defer l.leaseMu.Unlock() + l.leaseReuseDurationSeconds = duration +} + +// GetLease returns a lease based on requested ttl: if the cached previous +// lease can be reused, reuse it; otherwise request a new one from etcd. +func (l *leaseManager) GetLease(ctx context.Context, ttl int64) (clientv3.LeaseID, error) { + now := time.Now() + l.leaseMu.Lock() + defer l.leaseMu.Unlock() + // check if previous lease can be reused + reuseDurationSeconds := l.getReuseDurationSecondsLocked(ttl) + valid := now.Add(time.Duration(ttl) * time.Second).Before(l.prevLeaseExpirationTime) + sufficient := now.Add(time.Duration(ttl+reuseDurationSeconds) * time.Second).After(l.prevLeaseExpirationTime) + if valid && sufficient { + return l.prevLeaseID, nil + } + // request a lease with a little extra ttl from etcd + ttl += reuseDurationSeconds + lcr, err := l.client.Lease.Grant(ctx, ttl) + if err != nil { + return clientv3.LeaseID(0), err + } + // cache the new lease id + l.prevLeaseID = lcr.ID + l.prevLeaseExpirationTime = now.Add(time.Duration(ttl) * time.Second) + return lcr.ID, nil +} + +// getReuseDurationSecondsLocked returns the reusable duration in seconds +// based on the configuration. Lock has to be acquired before calling this +// function. +func (l *leaseManager) getReuseDurationSecondsLocked(ttl int64) int64 { + reuseDurationSeconds := int64(l.leaseReuseDurationPercent * float64(ttl)) + if reuseDurationSeconds > l.leaseReuseDurationSeconds { + reuseDurationSeconds = l.leaseReuseDurationSeconds + } + return reuseDurationSeconds +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go new file mode 100644 index 0000000000..e8a73082c9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go @@ -0,0 +1,84 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "fmt" + + "go.etcd.io/etcd/clientv3" + "k8s.io/klog/v2" +) + +func init() { + clientv3.SetLogger(klogWrapper{}) +} + +type klogWrapper struct{} + +const klogWrapperDepth = 4 + +func (klogWrapper) Info(args ...interface{}) { + klog.InfoDepth(klogWrapperDepth, args...) +} + +func (klogWrapper) Infoln(args ...interface{}) { + klog.InfoDepth(klogWrapperDepth, fmt.Sprintln(args...)) +} + +func (klogWrapper) Infof(format string, args ...interface{}) { + klog.InfoDepth(klogWrapperDepth, fmt.Sprintf(format, args...)) +} + +func (klogWrapper) Warning(args ...interface{}) { + klog.WarningDepth(klogWrapperDepth, args...) +} + +func (klogWrapper) Warningln(args ...interface{}) { + klog.WarningDepth(klogWrapperDepth, fmt.Sprintln(args...)) +} + +func (klogWrapper) Warningf(format string, args ...interface{}) { + klog.WarningDepth(klogWrapperDepth, fmt.Sprintf(format, args...)) +} + +func (klogWrapper) Error(args ...interface{}) { + klog.ErrorDepth(klogWrapperDepth, args...) +} + +func (klogWrapper) Errorln(args ...interface{}) { + klog.ErrorDepth(klogWrapperDepth, fmt.Sprintln(args...)) +} + +func (klogWrapper) Errorf(format string, args ...interface{}) { + klog.ErrorDepth(klogWrapperDepth, fmt.Sprintf(format, args...)) +} + +func (klogWrapper) Fatal(args ...interface{}) { + klog.FatalDepth(klogWrapperDepth, args...) +} + +func (klogWrapper) Fatalln(args ...interface{}) { + klog.FatalDepth(klogWrapperDepth, fmt.Sprintln(args...)) +} + +func (klogWrapper) Fatalf(format string, args ...interface{}) { + klog.FatalDepth(klogWrapperDepth, fmt.Sprintf(format, args...)) +} + +func (klogWrapper) V(l int) bool { + return bool(klog.V(klog.Level(l)).Enabled()) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go new file mode 100644 index 0000000000..1f001406a7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -0,0 +1,115 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + "time" + + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + etcdRequestLatency = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Name: "etcd_request_duration_seconds", + Help: "Etcd request latency in seconds for each operation and object type.", + // Keeping it similar to the buckets used by the apiserver_request_duration_seconds metric so that + // api latency and etcd latency can be more comparable side by side. + Buckets: []float64{.005, .01, .025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, + 0.8, 0.9, 1.0, 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"operation", "type"}, + ) + objectCounts = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "etcd_object_counts", + Help: "Number of stored objects at the time of last check split by kind.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"resource"}, + ) + dbTotalSize = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "etcd_db_total_size_in_bytes", + Help: "Total size of the etcd database file physically allocated in bytes.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"endpoint"}, + ) + etcdBookmarkCounts = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "etcd_bookmark_counts", + Help: "Number of etcd bookmarks (progress notify events) split by kind.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"resource"}, + ) +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + // Register the metrics. + registerMetrics.Do(func() { + legacyregistry.MustRegister(etcdRequestLatency) + legacyregistry.MustRegister(objectCounts) + legacyregistry.MustRegister(dbTotalSize) + legacyregistry.MustRegister(etcdBookmarkCounts) + }) +} + +// UpdateObjectCount sets the etcd_object_counts metric. +func UpdateObjectCount(resourcePrefix string, count int64) { + objectCounts.WithLabelValues(resourcePrefix).Set(float64(count)) +} + +// RecordEtcdRequestLatency sets the etcd_request_duration_seconds metrics. +func RecordEtcdRequestLatency(verb, resource string, startTime time.Time) { + etcdRequestLatency.WithLabelValues(verb, resource).Observe(sinceInSeconds(startTime)) +} + +// RecordEtcdBookmark updates the etcd_bookmark_counts metric. +func RecordEtcdBookmark(resource string) { + etcdBookmarkCounts.WithLabelValues(resource).Inc() +} + +// Reset resets the etcd_request_duration_seconds metric. +func Reset() { + etcdRequestLatency.Reset() +} + +// sinceInSeconds gets the time since the specified start in seconds. +func sinceInSeconds(start time.Time) float64 { + return time.Since(start).Seconds() +} + +// UpdateEtcdDbSize sets the etcd_db_total_size_in_bytes metric. +func UpdateEtcdDbSize(ep string, size int64) { + dbTotalSize.WithLabelValues(ep).Set(float64(size)) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go new file mode 100644 index 0000000000..0cff6b3fc9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -0,0 +1,939 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "path" + "reflect" + "strings" + "time" + + "go.etcd.io/etcd/clientv3" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/etcd3/metrics" + "k8s.io/apiserver/pkg/storage/value" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +// authenticatedDataString satisfies the value.Context interface. It uses the key to +// authenticate the stored data. This does not defend against reuse of previously +// encrypted values under the same key, but will prevent an attacker from using an +// encrypted value from a different key. A stronger authenticated data segment would +// include the etcd3 Version field (which is incremented on each write to a key and +// reset when the key is deleted), but an attacker with write access to etcd can +// force deletion and recreation of keys to weaken that angle. +type authenticatedDataString string + +// AuthenticatedData implements the value.Context interface. +func (d authenticatedDataString) AuthenticatedData() []byte { + return []byte(string(d)) +} + +var _ value.Context = authenticatedDataString("") + +type store struct { + client *clientv3.Client + codec runtime.Codec + versioner storage.Versioner + transformer value.Transformer + pathPrefix string + watcher *watcher + pagingEnabled bool + leaseManager *leaseManager +} + +type objState struct { + obj runtime.Object + meta *storage.ResponseMeta + rev int64 + data []byte + stale bool +} + +// New returns an etcd3 implementation of storage.Interface. +func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool) storage.Interface { + return newStore(c, newFunc, pagingEnabled, codec, prefix, transformer) +} + +func newStore(c *clientv3.Client, newFunc func() runtime.Object, pagingEnabled bool, codec runtime.Codec, prefix string, transformer value.Transformer) *store { + versioner := APIObjectVersioner{} + result := &store{ + client: c, + codec: codec, + versioner: versioner, + transformer: transformer, + pagingEnabled: pagingEnabled, + // for compatibility with etcd2 impl. + // no-op for default prefix of '/registry'. + // keeps compatibility with etcd2 impl for custom prefixes that don't start with '/' + pathPrefix: path.Join("/", prefix), + watcher: newWatcher(c, codec, newFunc, versioner, transformer), + leaseManager: newDefaultLeaseManager(c), + } + return result +} + +// Versioner implements storage.Interface.Versioner. +func (s *store) Versioner() storage.Versioner { + return s.versioner +} + +// Get implements storage.Interface.Get. +func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error { + key = path.Join(s.pathPrefix, key) + startTime := time.Now() + getResp, err := s.client.KV.Get(ctx, key) + metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime) + if err != nil { + return err + } + if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil { + return err + } + + if len(getResp.Kvs) == 0 { + if opts.IgnoreNotFound { + return runtime.SetZeroValue(out) + } + return storage.NewKeyNotFoundError(key, 0) + } + kv := getResp.Kvs[0] + + data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key)) + if err != nil { + return storage.NewInternalError(err.Error()) + } + + return decode(s.codec, s.versioner, data, out, kv.ModRevision) +} + +// Create implements storage.Interface.Create. +func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { + if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 { + return errors.New("resourceVersion should not be set on objects to be created") + } + if err := s.versioner.PrepareObjectForStorage(obj); err != nil { + return fmt.Errorf("PrepareObjectForStorage failed: %v", err) + } + data, err := runtime.Encode(s.codec, obj) + if err != nil { + return err + } + key = path.Join(s.pathPrefix, key) + + opts, err := s.ttlOpts(ctx, int64(ttl)) + if err != nil { + return err + } + + newData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key)) + if err != nil { + return storage.NewInternalError(err.Error()) + } + + startTime := time.Now() + txnResp, err := s.client.KV.Txn(ctx).If( + notFound(key), + ).Then( + clientv3.OpPut(key, string(newData), opts...), + ).Commit() + metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime) + if err != nil { + return err + } + if !txnResp.Succeeded { + return storage.NewKeyExistsError(key, 0) + } + + if out != nil { + putResp := txnResp.Responses[0].GetResponsePut() + return decode(s.codec, s.versioner, data, out, putResp.Header.Revision) + } + return nil +} + +// Delete implements storage.Interface.Delete. +func (s *store) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, validateDeletion storage.ValidateObjectFunc) error { + v, err := conversion.EnforcePtr(out) + if err != nil { + return fmt.Errorf("unable to convert output object to pointer: %v", err) + } + key = path.Join(s.pathPrefix, key) + return s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion) +} + +func (s *store) conditionalDelete(ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions, validateDeletion storage.ValidateObjectFunc) error { + startTime := time.Now() + getResp, err := s.client.KV.Get(ctx, key) + metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime) + if err != nil { + return err + } + for { + origState, err := s.getState(getResp, key, v, false) + if err != nil { + return err + } + if preconditions != nil { + if err := preconditions.Check(key, origState.obj); err != nil { + return err + } + } + if err := validateDeletion(ctx, origState.obj); err != nil { + return err + } + startTime := time.Now() + txnResp, err := s.client.KV.Txn(ctx).If( + clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev), + ).Then( + clientv3.OpDelete(key), + ).Else( + clientv3.OpGet(key), + ).Commit() + metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime) + if err != nil { + return err + } + if !txnResp.Succeeded { + getResp = (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange()) + klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key) + continue + } + return decode(s.codec, s.versioner, origState.data, out, origState.rev) + } +} + +// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate. +func (s *store) GuaranteedUpdate( + ctx context.Context, key string, out runtime.Object, ignoreNotFound bool, + preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error { + trace := utiltrace.New("GuaranteedUpdate etcd3", utiltrace.Field{"type", getTypeName(out)}) + defer trace.LogIfLong(500 * time.Millisecond) + + v, err := conversion.EnforcePtr(out) + if err != nil { + return fmt.Errorf("unable to convert output object to pointer: %v", err) + } + key = path.Join(s.pathPrefix, key) + + getCurrentState := func() (*objState, error) { + startTime := time.Now() + getResp, err := s.client.KV.Get(ctx, key) + metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime) + if err != nil { + return nil, err + } + return s.getState(getResp, key, v, ignoreNotFound) + } + + var origState *objState + var mustCheckData bool + if suggestion != nil { + origState, err = s.getStateFromObject(suggestion) + if err != nil { + return err + } + mustCheckData = true + } else { + origState, err = getCurrentState() + if err != nil { + return err + } + } + trace.Step("initial value restored") + + transformContext := authenticatedDataString(key) + for { + if err := preconditions.Check(key, origState.obj); err != nil { + // If our data is already up to date, return the error + if !mustCheckData { + return err + } + + // It's possible we were working with stale data + // Actually fetch + origState, err = getCurrentState() + if err != nil { + return err + } + mustCheckData = false + // Retry + continue + } + + ret, ttl, err := s.updateState(origState, tryUpdate) + if err != nil { + // If our data is already up to date, return the error + if !mustCheckData { + return err + } + + // It's possible we were working with stale data + // Actually fetch + origState, err = getCurrentState() + if err != nil { + return err + } + mustCheckData = false + // Retry + continue + } + + data, err := runtime.Encode(s.codec, ret) + if err != nil { + return err + } + if !origState.stale && bytes.Equal(data, origState.data) { + // if we skipped the original Get in this loop, we must refresh from + // etcd in order to be sure the data in the store is equivalent to + // our desired serialization + if mustCheckData { + origState, err = getCurrentState() + if err != nil { + return err + } + mustCheckData = false + if !bytes.Equal(data, origState.data) { + // original data changed, restart loop + continue + } + } + // recheck that the data from etcd is not stale before short-circuiting a write + if !origState.stale { + return decode(s.codec, s.versioner, origState.data, out, origState.rev) + } + } + + newData, err := s.transformer.TransformToStorage(data, transformContext) + if err != nil { + return storage.NewInternalError(err.Error()) + } + + opts, err := s.ttlOpts(ctx, int64(ttl)) + if err != nil { + return err + } + trace.Step("Transaction prepared") + + startTime := time.Now() + txnResp, err := s.client.KV.Txn(ctx).If( + clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev), + ).Then( + clientv3.OpPut(key, string(newData), opts...), + ).Else( + clientv3.OpGet(key), + ).Commit() + metrics.RecordEtcdRequestLatency("update", getTypeName(out), startTime) + if err != nil { + return err + } + trace.Step("Transaction committed") + if !txnResp.Succeeded { + getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange()) + klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key) + origState, err = s.getState(getResp, key, v, ignoreNotFound) + if err != nil { + return err + } + trace.Step("Retry value restored") + mustCheckData = false + continue + } + putResp := txnResp.Responses[0].GetResponsePut() + + return decode(s.codec, s.versioner, data, out, putResp.Header.Revision) + } +} + +// GetToList implements storage.Interface.GetToList. +func (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error { + resourceVersion := listOpts.ResourceVersion + match := listOpts.ResourceVersionMatch + pred := listOpts.Predicate + trace := utiltrace.New("GetToList etcd3", + utiltrace.Field{"key", key}, + utiltrace.Field{"resourceVersion", resourceVersion}, + utiltrace.Field{"resourceVersionMatch", match}, + utiltrace.Field{"limit", pred.Limit}, + utiltrace.Field{"continue", pred.Continue}) + defer trace.LogIfLong(500 * time.Millisecond) + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + v, err := conversion.EnforcePtr(listPtr) + if err != nil || v.Kind() != reflect.Slice { + return fmt.Errorf("need ptr to slice: %v", err) + } + + newItemFunc := getNewItemFunc(listObj, v) + + key = path.Join(s.pathPrefix, key) + startTime := time.Now() + var opts []clientv3.OpOption + if len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact { + rv, err := s.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) + } + opts = append(opts, clientv3.WithRev(int64(rv))) + } + + getResp, err := s.client.KV.Get(ctx, key, opts...) + metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime) + if err != nil { + return err + } + if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil { + return err + } + + if len(getResp.Kvs) > 0 { + data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key)) + if err != nil { + return storage.NewInternalError(err.Error()) + } + if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil { + return err + } + } + // update version with cluster level revision + return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), "", nil) +} + +func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object { + // For unstructured lists with a target group/version, preserve the group/version in the instantiated list items + if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured { + if apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 { + return func() runtime.Object { + return &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": apiVersion}} + } + } + } + + // Otherwise just instantiate an empty item + elem := v.Type().Elem() + return func() runtime.Object { + return reflect.New(elem).Interface().(runtime.Object) + } +} + +func (s *store) Count(key string) (int64, error) { + key = path.Join(s.pathPrefix, key) + + // We need to make sure the key ended with "/" so that we only get children "directories". + // e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three, + // while with prefix "/a/" will return only "/a/b" which is the correct answer. + if !strings.HasSuffix(key, "/") { + key += "/" + } + + startTime := time.Now() + getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly()) + metrics.RecordEtcdRequestLatency("listWithCount", key, startTime) + if err != nil { + return 0, err + } + return getResp.Count, nil +} + +// continueToken is a simple structured object for encoding the state of a continue token. +// TODO: if we change the version of the encoded from, we can't start encoding the new version +// until all other servers are upgraded (i.e. we need to support rolling schema) +// This is a public API struct and cannot change. +type continueToken struct { + APIVersion string `json:"v"` + ResourceVersion int64 `json:"rv"` + StartKey string `json:"start"` +} + +// parseFrom transforms an encoded predicate from into a versioned struct. +// TODO: return a typed error that instructs clients that they must relist +func decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) { + data, err := base64.RawURLEncoding.DecodeString(continueValue) + if err != nil { + return "", 0, fmt.Errorf("continue key is not valid: %v", err) + } + var c continueToken + if err := json.Unmarshal(data, &c); err != nil { + return "", 0, fmt.Errorf("continue key is not valid: %v", err) + } + switch c.APIVersion { + case "meta.k8s.io/v1": + if c.ResourceVersion == 0 { + return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)") + } + if len(c.StartKey) == 0 { + return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version meta.k8s.io/v1)") + } + // defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot + // be at a higher level of the hierarchy, and so when we append the key prefix we will end up with + // continue start key that is fully qualified and cannot range over anything less specific than + // keyPrefix. + key := c.StartKey + if !strings.HasPrefix(key, "/") { + key = "/" + key + } + cleaned := path.Clean(key) + if cleaned != key { + return "", 0, fmt.Errorf("continue key is not valid: %s", c.StartKey) + } + return keyPrefix + cleaned[1:], c.ResourceVersion, nil + default: + return "", 0, fmt.Errorf("continue key is not valid: server does not recognize this encoded version %q", c.APIVersion) + } +} + +// encodeContinue returns a string representing the encoded continuation of the current query. +func encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) { + nextKey := strings.TrimPrefix(key, keyPrefix) + if nextKey == key { + return "", fmt.Errorf("unable to encode next field: the key and key prefix do not match") + } + out, err := json.Marshal(&continueToken{APIVersion: "meta.k8s.io/v1", ResourceVersion: resourceVersion, StartKey: nextKey}) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(out), nil +} + +// List implements storage.Interface.List. +func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + resourceVersion := opts.ResourceVersion + match := opts.ResourceVersionMatch + pred := opts.Predicate + trace := utiltrace.New("List etcd3", + utiltrace.Field{"key", key}, + utiltrace.Field{"resourceVersion", resourceVersion}, + utiltrace.Field{"resourceVersionMatch", match}, + utiltrace.Field{"limit", pred.Limit}, + utiltrace.Field{"continue", pred.Continue}) + defer trace.LogIfLong(500 * time.Millisecond) + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + v, err := conversion.EnforcePtr(listPtr) + if err != nil || v.Kind() != reflect.Slice { + return fmt.Errorf("need ptr to slice: %v", err) + } + + if s.pathPrefix != "" { + key = path.Join(s.pathPrefix, key) + } + // We need to make sure the key ended with "/" so that we only get children "directories". + // e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three, + // while with prefix "/a/" will return only "/a/b" which is the correct answer. + if !strings.HasSuffix(key, "/") { + key += "/" + } + keyPrefix := key + + // set the appropriate clientv3 options to filter the returned data set + var paging bool + options := make([]clientv3.OpOption, 0, 4) + if s.pagingEnabled && pred.Limit > 0 { + paging = true + options = append(options, clientv3.WithLimit(pred.Limit)) + } + + newItemFunc := getNewItemFunc(listObj, v) + + var fromRV *uint64 + if len(resourceVersion) > 0 { + parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) + } + fromRV = &parsedRV + } + + var returnedRV, continueRV, withRev int64 + var continueKey string + switch { + case s.pagingEnabled && len(pred.Continue) > 0: + continueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err)) + } + + if len(resourceVersion) > 0 && resourceVersion != "0" { + return apierrors.NewBadRequest("specifying resource version is not allowed when using continue") + } + + rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) + options = append(options, clientv3.WithRange(rangeEnd)) + key = continueKey + + // If continueRV > 0, the LIST request needs a specific resource version. + // continueRV==0 is invalid. + // If continueRV < 0, the request is for the latest resource version. + if continueRV > 0 { + withRev = continueRV + returnedRV = continueRV + } + case s.pagingEnabled && pred.Limit > 0: + if fromRV != nil { + switch match { + case metav1.ResourceVersionMatchNotOlderThan: + // The not older than constraint is checked after we get a response from etcd, + // and returnedRV is then set to the revision we get from the etcd response. + case metav1.ResourceVersionMatchExact: + returnedRV = int64(*fromRV) + withRev = returnedRV + case "": // legacy case + if *fromRV > 0 { + returnedRV = int64(*fromRV) + withRev = returnedRV + } + default: + return fmt.Errorf("unknown ResourceVersionMatch value: %v", match) + } + } + + rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) + options = append(options, clientv3.WithRange(rangeEnd)) + default: + if fromRV != nil { + switch match { + case metav1.ResourceVersionMatchNotOlderThan: + // The not older than constraint is checked after we get a response from etcd, + // and returnedRV is then set to the revision we get from the etcd response. + case metav1.ResourceVersionMatchExact: + returnedRV = int64(*fromRV) + withRev = returnedRV + case "": // legacy case + default: + return fmt.Errorf("unknown ResourceVersionMatch value: %v", match) + } + } + + options = append(options, clientv3.WithPrefix()) + } + if withRev != 0 { + options = append(options, clientv3.WithRev(withRev)) + } + + // loop until we have filled the requested limit from etcd or there are no more results + var lastKey []byte + var hasMore bool + var getResp *clientv3.GetResponse + for { + startTime := time.Now() + getResp, err = s.client.KV.Get(ctx, key, options...) + metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime) + if err != nil { + return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix) + } + if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil { + return err + } + hasMore = getResp.More + + if len(getResp.Kvs) == 0 && getResp.More { + return fmt.Errorf("no results were found, but etcd indicated there were more values remaining") + } + + // avoid small allocations for the result slice, since this can be called in many + // different contexts and we don't know how significantly the result will be filtered + if pred.Empty() { + growSlice(v, len(getResp.Kvs)) + } else { + growSlice(v, 2048, len(getResp.Kvs)) + } + + // take items from the response until the bucket is full, filtering as we go + for _, kv := range getResp.Kvs { + if paging && int64(v.Len()) >= pred.Limit { + hasMore = true + break + } + lastKey = kv.Key + + data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key)) + if err != nil { + return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err) + } + + if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil { + return err + } + } + + // indicate to the client which resource version was returned + if returnedRV == 0 { + returnedRV = getResp.Header.Revision + } + + // no more results remain or we didn't request paging + if !hasMore || !paging { + break + } + // we're paging but we have filled our bucket + if int64(v.Len()) >= pred.Limit { + break + } + key = string(lastKey) + "\x00" + if withRev == 0 { + withRev = returnedRV + options = append(options, clientv3.WithRev(withRev)) + } + } + + // instruct the client to begin querying from immediately after the last key we returned + // we never return a key that the client wouldn't be allowed to see + if hasMore { + // we want to start immediately after the last key + next, err := encodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV) + if err != nil { + return err + } + var remainingItemCount *int64 + // getResp.Count counts in objects that do not match the pred. + // Instead of returning inaccurate count for non-empty selectors, we return nil. + // Only set remainingItemCount if the predicate is empty. + if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) { + if pred.Empty() { + c := int64(getResp.Count - pred.Limit) + remainingItemCount = &c + } + } + return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount) + } + + // no continuation + return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil) +} + +// growSlice takes a slice value and grows its capacity up +// to the maximum of the passed sizes or maxCapacity, whichever +// is smaller. Above maxCapacity decisions about allocation are left +// to the Go runtime on append. This allows a caller to make an +// educated guess about the potential size of the total list while +// still avoiding overly aggressive initial allocation. If sizes +// is empty maxCapacity will be used as the size to grow. +func growSlice(v reflect.Value, maxCapacity int, sizes ...int) { + cap := v.Cap() + max := cap + for _, size := range sizes { + if size > max { + max = size + } + } + if len(sizes) == 0 || max > maxCapacity { + max = maxCapacity + } + if max <= cap { + return + } + if v.Len() > 0 { + extra := reflect.MakeSlice(v.Type(), 0, max) + reflect.Copy(extra, v) + v.Set(extra) + } else { + extra := reflect.MakeSlice(v.Type(), 0, max) + v.Set(extra) + } +} + +// Watch implements storage.Interface.Watch. +func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return s.watch(ctx, key, opts, false) +} + +// WatchList implements storage.Interface.WatchList. +func (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return s.watch(ctx, key, opts, true) +} + +func (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) { + rev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion) + if err != nil { + return nil, err + } + key = path.Join(s.pathPrefix, key) + return s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate) +} + +func (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) { + state := &objState{ + meta: &storage.ResponseMeta{}, + } + + if u, ok := v.Addr().Interface().(runtime.Unstructured); ok { + state.obj = u.NewEmptyInstance() + } else { + state.obj = reflect.New(v.Type()).Interface().(runtime.Object) + } + + if len(getResp.Kvs) == 0 { + if !ignoreNotFound { + return nil, storage.NewKeyNotFoundError(key, 0) + } + if err := runtime.SetZeroValue(state.obj); err != nil { + return nil, err + } + } else { + data, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key)) + if err != nil { + return nil, storage.NewInternalError(err.Error()) + } + state.rev = getResp.Kvs[0].ModRevision + state.meta.ResourceVersion = uint64(state.rev) + state.data = data + state.stale = stale + if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil { + return nil, err + } + } + return state, nil +} + +func (s *store) getStateFromObject(obj runtime.Object) (*objState, error) { + state := &objState{ + obj: obj, + meta: &storage.ResponseMeta{}, + } + + rv, err := s.versioner.ObjectResourceVersion(obj) + if err != nil { + return nil, fmt.Errorf("couldn't get resource version: %v", err) + } + state.rev = int64(rv) + state.meta.ResourceVersion = uint64(state.rev) + + // Compute the serialized form - for that we need to temporarily clean + // its resource version field (those are not stored in etcd). + if err := s.versioner.PrepareObjectForStorage(obj); err != nil { + return nil, fmt.Errorf("PrepareObjectForStorage failed: %v", err) + } + state.data, err = runtime.Encode(s.codec, obj) + if err != nil { + return nil, err + } + if err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil { + klog.Errorf("failed to update object version: %v", err) + } + return state, nil +} + +func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) { + ret, ttlPtr, err := userUpdate(st.obj, *st.meta) + if err != nil { + return nil, 0, err + } + + if err := s.versioner.PrepareObjectForStorage(ret); err != nil { + return nil, 0, fmt.Errorf("PrepareObjectForStorage failed: %v", err) + } + var ttl uint64 + if ttlPtr != nil { + ttl = *ttlPtr + } + return ret, ttl, nil +} + +// ttlOpts returns client options based on given ttl. +// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length +func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) { + if ttl == 0 { + return nil, nil + } + id, err := s.leaseManager.GetLease(ctx, ttl) + if err != nil { + return nil, err + } + return []clientv3.OpOption{clientv3.WithLease(id)}, nil +} + +// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is +// greater than the most recent actualRevision available from storage. +func (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error { + if minimumResourceVersion == "" { + return nil + } + minimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) + } + // Enforce the storage.Interface guarantee that the resource version of the returned data + // "will be at least 'resourceVersion'". + if minimumRV > actualRevision { + return storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0) + } + return nil +} + +// decode decodes value of bytes into object. It will also set the object resource version to rev. +// On success, objPtr would be set to the object. +func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error { + if _, err := conversion.EnforcePtr(objPtr); err != nil { + return fmt.Errorf("unable to convert output object to pointer: %v", err) + } + _, _, err := codec.Decode(value, nil, objPtr) + if err != nil { + return err + } + // being unable to set the version does not prevent the object from being extracted + if err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil { + klog.Errorf("failed to update object version: %v", err) + } + return nil +} + +// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice. +func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error { + obj, _, err := codec.Decode(data, nil, newItemFunc()) + if err != nil { + return err + } + // being unable to set the version does not prevent the object from being extracted + if err := versioner.UpdateObject(obj, rev); err != nil { + klog.Errorf("failed to update object version: %v", err) + } + if matched, err := pred.Matches(obj); err == nil && matched { + v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) + } + return nil +} + +func notFound(key string) clientv3.Cmp { + return clientv3.Compare(clientv3.ModRevision(key), "=", 0) +} + +// getTypeName returns type name of an object for reporting purposes. +func getTypeName(obj interface{}) string { + return reflect.TypeOf(obj).String() +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go new file mode 100644 index 0000000000..bd87382e83 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -0,0 +1,468 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "context" + "errors" + "fmt" + "os" + "reflect" + "strconv" + "strings" + "sync" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/etcd3/metrics" + "k8s.io/apiserver/pkg/storage/value" + + "go.etcd.io/etcd/clientv3" + "k8s.io/klog/v2" +) + +const ( + // We have set a buffer in order to reduce times of context switches. + incomingBufSize = 100 + outgoingBufSize = 100 +) + +// fatalOnDecodeError is used during testing to panic the server if watcher encounters a decoding error +var fatalOnDecodeError = false + +// errTestingDecode is the only error that testingDeferOnDecodeError catches during a panic +var errTestingDecode = errors.New("sentinel error only used during testing to indicate watch decoding error") + +// testingDeferOnDecodeError is used during testing to recover from a panic caused by errTestingDecode, all other values continue to panic +func testingDeferOnDecodeError() { + if r := recover(); r != nil && r != errTestingDecode { + panic(r) + } +} + +func init() { + // check to see if we are running in a test environment + TestOnlySetFatalOnDecodeError(true) + fatalOnDecodeError, _ = strconv.ParseBool(os.Getenv("KUBE_PANIC_WATCH_DECODE_ERROR")) +} + +// TestOnlySetFatalOnDecodeError should only be used for cases where decode errors are expected and need to be tested. e.g. conversion webhooks. +func TestOnlySetFatalOnDecodeError(b bool) { + fatalOnDecodeError = b +} + +type watcher struct { + client *clientv3.Client + codec runtime.Codec + newFunc func() runtime.Object + objectType string + versioner storage.Versioner + transformer value.Transformer +} + +// watchChan implements watch.Interface. +type watchChan struct { + watcher *watcher + key string + initialRev int64 + recursive bool + progressNotify bool + internalPred storage.SelectionPredicate + ctx context.Context + cancel context.CancelFunc + incomingEventChan chan *event + resultChan chan watch.Event + errChan chan error +} + +func newWatcher(client *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, versioner storage.Versioner, transformer value.Transformer) *watcher { + res := &watcher{ + client: client, + codec: codec, + newFunc: newFunc, + versioner: versioner, + transformer: transformer, + } + if newFunc == nil { + res.objectType = "" + } else { + res.objectType = reflect.TypeOf(newFunc()).String() + } + return res +} + +// Watch watches on a key and returns a watch.Interface that transfers relevant notifications. +// If rev is zero, it will return the existing object(s) and then start watching from +// the maximum revision+1 from returned objects. +// If rev is non-zero, it will watch events happened after given revision. +// If recursive is false, it watches on given key. +// If recursive is true, it watches any children and directories under the key, excluding the root key itself. +// pred must be non-nil. Only if pred matches the change, it will be returned. +func (w *watcher) Watch(ctx context.Context, key string, rev int64, recursive, progressNotify bool, pred storage.SelectionPredicate) (watch.Interface, error) { + if recursive && !strings.HasSuffix(key, "/") { + key += "/" + } + wc := w.createWatchChan(ctx, key, rev, recursive, progressNotify, pred) + go wc.run() + return wc, nil +} + +func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, recursive, progressNotify bool, pred storage.SelectionPredicate) *watchChan { + wc := &watchChan{ + watcher: w, + key: key, + initialRev: rev, + recursive: recursive, + progressNotify: progressNotify, + internalPred: pred, + incomingEventChan: make(chan *event, incomingBufSize), + resultChan: make(chan watch.Event, outgoingBufSize), + errChan: make(chan error, 1), + } + if pred.Empty() { + // The filter doesn't filter out any object. + wc.internalPred = storage.Everything + } + + // The etcd server waits until it cannot find a leader for 3 election + // timeouts to cancel existing streams. 3 is currently a hard coded + // constant. The election timeout defaults to 1000ms. If the cluster is + // healthy, when the leader is stopped, the leadership transfer should be + // smooth. (leader transfers its leadership before stopping). If leader is + // hard killed, other servers will take an election timeout to realize + // leader lost and start campaign. + wc.ctx, wc.cancel = context.WithCancel(clientv3.WithRequireLeader(ctx)) + return wc +} + +func (wc *watchChan) run() { + watchClosedCh := make(chan struct{}) + go wc.startWatching(watchClosedCh) + + var resultChanWG sync.WaitGroup + resultChanWG.Add(1) + go wc.processEvent(&resultChanWG) + + select { + case err := <-wc.errChan: + if err == context.Canceled { + break + } + errResult := transformErrorToEvent(err) + if errResult != nil { + // error result is guaranteed to be received by user before closing ResultChan. + select { + case wc.resultChan <- *errResult: + case <-wc.ctx.Done(): // user has given up all results + } + } + case <-watchClosedCh: + case <-wc.ctx.Done(): // user cancel + } + + // We use wc.ctx to reap all goroutines. Under whatever condition, we should stop them all. + // It's fine to double cancel. + wc.cancel() + + // we need to wait until resultChan wouldn't be used anymore + resultChanWG.Wait() + close(wc.resultChan) +} + +func (wc *watchChan) Stop() { + wc.cancel() +} + +func (wc *watchChan) ResultChan() <-chan watch.Event { + return wc.resultChan +} + +// sync tries to retrieve existing data and send them to process. +// The revision to watch will be set to the revision in response. +// All events sent will have isCreated=true +func (wc *watchChan) sync() error { + opts := []clientv3.OpOption{} + if wc.recursive { + opts = append(opts, clientv3.WithPrefix()) + } + getResp, err := wc.watcher.client.Get(wc.ctx, wc.key, opts...) + if err != nil { + return err + } + wc.initialRev = getResp.Header.Revision + for _, kv := range getResp.Kvs { + wc.sendEvent(parseKV(kv)) + } + return nil +} + +// logWatchChannelErr checks whether the error is about mvcc revision compaction which is regarded as warning +func logWatchChannelErr(err error) { + if !strings.Contains(err.Error(), "mvcc: required revision has been compacted") { + klog.Errorf("watch chan error: %v", err) + } else { + klog.Warningf("watch chan error: %v", err) + } +} + +// startWatching does: +// - get current objects if initialRev=0; set initialRev to current rev +// - watch on given key and send events to process. +func (wc *watchChan) startWatching(watchClosedCh chan struct{}) { + if wc.initialRev == 0 { + if err := wc.sync(); err != nil { + klog.Errorf("failed to sync with latest state: %v", err) + wc.sendError(err) + return + } + } + opts := []clientv3.OpOption{clientv3.WithRev(wc.initialRev + 1), clientv3.WithPrevKV()} + if wc.recursive { + opts = append(opts, clientv3.WithPrefix()) + } + if wc.progressNotify { + opts = append(opts, clientv3.WithProgressNotify()) + } + wch := wc.watcher.client.Watch(wc.ctx, wc.key, opts...) + for wres := range wch { + if wres.Err() != nil { + err := wres.Err() + // If there is an error on server (e.g. compaction), the channel will return it before closed. + logWatchChannelErr(err) + wc.sendError(err) + return + } + if wres.IsProgressNotify() { + wc.sendEvent(progressNotifyEvent(wres.Header.GetRevision())) + metrics.RecordEtcdBookmark(wc.watcher.objectType) + continue + } + + for _, e := range wres.Events { + parsedEvent, err := parseEvent(e) + if err != nil { + logWatchChannelErr(err) + wc.sendError(err) + return + } + wc.sendEvent(parsedEvent) + } + } + // When we come to this point, it's only possible that client side ends the watch. + // e.g. cancel the context, close the client. + // If this watch chan is broken and context isn't cancelled, other goroutines will still hang. + // We should notify the main thread that this goroutine has exited. + close(watchClosedCh) +} + +// processEvent processes events from etcd watcher and sends results to resultChan. +func (wc *watchChan) processEvent(wg *sync.WaitGroup) { + defer wg.Done() + + for { + select { + case e := <-wc.incomingEventChan: + res := wc.transform(e) + if res == nil { + continue + } + if len(wc.resultChan) == outgoingBufSize { + klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow dispatching events to watchers", "outgoingEvents", outgoingBufSize) + } + // If user couldn't receive results fast enough, we also block incoming events from watcher. + // Because storing events in local will cause more memory usage. + // The worst case would be closing the fast watcher. + select { + case wc.resultChan <- *res: + case <-wc.ctx.Done(): + return + } + case <-wc.ctx.Done(): + return + } + } +} + +func (wc *watchChan) filter(obj runtime.Object) bool { + if wc.internalPred.Empty() { + return true + } + matched, err := wc.internalPred.Matches(obj) + return err == nil && matched +} + +func (wc *watchChan) acceptAll() bool { + return wc.internalPred.Empty() +} + +// transform transforms an event into a result for user if not filtered. +func (wc *watchChan) transform(e *event) (res *watch.Event) { + curObj, oldObj, err := wc.prepareObjs(e) + if err != nil { + klog.Errorf("failed to prepare current and previous objects: %v", err) + wc.sendError(err) + return nil + } + + switch { + case e.isProgressNotify: + if wc.watcher.newFunc == nil { + return nil + } + object := wc.watcher.newFunc() + if err := wc.watcher.versioner.UpdateObject(object, uint64(e.rev)); err != nil { + klog.Errorf("failed to propagate object version: %v", err) + return nil + } + res = &watch.Event{ + Type: watch.Bookmark, + Object: object, + } + case e.isDeleted: + if !wc.filter(oldObj) { + return nil + } + res = &watch.Event{ + Type: watch.Deleted, + Object: oldObj, + } + case e.isCreated: + if !wc.filter(curObj) { + return nil + } + res = &watch.Event{ + Type: watch.Added, + Object: curObj, + } + default: + if wc.acceptAll() { + res = &watch.Event{ + Type: watch.Modified, + Object: curObj, + } + return res + } + curObjPasses := wc.filter(curObj) + oldObjPasses := wc.filter(oldObj) + switch { + case curObjPasses && oldObjPasses: + res = &watch.Event{ + Type: watch.Modified, + Object: curObj, + } + case curObjPasses && !oldObjPasses: + res = &watch.Event{ + Type: watch.Added, + Object: curObj, + } + case !curObjPasses && oldObjPasses: + res = &watch.Event{ + Type: watch.Deleted, + Object: oldObj, + } + } + } + return res +} + +func transformErrorToEvent(err error) *watch.Event { + err = interpretWatchError(err) + if _, ok := err.(apierrors.APIStatus); !ok { + err = apierrors.NewInternalError(err) + } + status := err.(apierrors.APIStatus).Status() + return &watch.Event{ + Type: watch.Error, + Object: &status, + } +} + +func (wc *watchChan) sendError(err error) { + select { + case wc.errChan <- err: + case <-wc.ctx.Done(): + } +} + +func (wc *watchChan) sendEvent(e *event) { + if len(wc.incomingEventChan) == incomingBufSize { + klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow decoding, user not receiving fast, or other processing logic", "incomingEvents", incomingBufSize) + } + select { + case wc.incomingEventChan <- e: + case <-wc.ctx.Done(): + } +} + +func (wc *watchChan) prepareObjs(e *event) (curObj runtime.Object, oldObj runtime.Object, err error) { + if e.isProgressNotify { + // progressNotify events doesn't contain neither current nor previous object version, + return nil, nil, nil + } + + if !e.isDeleted { + data, _, err := wc.watcher.transformer.TransformFromStorage(e.value, authenticatedDataString(e.key)) + if err != nil { + return nil, nil, err + } + curObj, err = decodeObj(wc.watcher.codec, wc.watcher.versioner, data, e.rev) + if err != nil { + return nil, nil, err + } + } + // We need to decode prevValue, only if this is deletion event or + // the underlying filter doesn't accept all objects (otherwise we + // know that the filter for previous object will return true and + // we need the object only to compute whether it was filtered out + // before). + if len(e.prevValue) > 0 && (e.isDeleted || !wc.acceptAll()) { + data, _, err := wc.watcher.transformer.TransformFromStorage(e.prevValue, authenticatedDataString(e.key)) + if err != nil { + return nil, nil, err + } + // Note that this sends the *old* object with the etcd revision for the time at + // which it gets deleted. + oldObj, err = decodeObj(wc.watcher.codec, wc.watcher.versioner, data, e.rev) + if err != nil { + return nil, nil, err + } + } + return curObj, oldObj, nil +} + +func decodeObj(codec runtime.Codec, versioner storage.Versioner, data []byte, rev int64) (_ runtime.Object, err error) { + obj, err := runtime.Decode(codec, []byte(data)) + if err != nil { + if fatalOnDecodeError { + // catch watch decode error iff we caused it on + // purpose during a unit test + defer testingDeferOnDecodeError() + // we are running in a test environment and thus an + // error here is due to a coder mistake if the defer + // does not catch it + panic(err) + } + return nil, err + } + // ensure resource version is set on the object we load from etcd + if err := versioner.UpdateObject(obj, uint64(rev)); err != nil { + return nil, fmt.Errorf("failure to version api object (%d) %#v: %v", rev, obj, err) + } + return obj, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/interfaces.go b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go new file mode 100644 index 0000000000..01f9132f55 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go @@ -0,0 +1,275 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +// Versioner abstracts setting and retrieving metadata fields from database response +// onto the object ot list. It is required to maintain storage invariants - updating an +// object twice with the same data except for the ResourceVersion and SelfLink must be +// a no-op. A resourceVersion of type uint64 is a 'raw' resourceVersion, +// intended to be sent directly to or from the backend. A resourceVersion of +// type string is a 'safe' resourceVersion, intended for consumption by users. +type Versioner interface { + // UpdateObject sets storage metadata into an API object. Returns an error if the object + // cannot be updated correctly. May return nil if the requested object does not need metadata + // from database. + UpdateObject(obj runtime.Object, resourceVersion uint64) error + // UpdateList sets the resource version into an API list object. Returns an error if the object + // cannot be updated correctly. May return nil if the requested object does not need metadata from + // database. continueValue is optional and indicates that more results are available if the client + // passes that value to the server in a subsequent call. remainingItemCount indicates the number + // of remaining objects if the list is partial. The remainingItemCount field is omitted during + // serialization if it is set to nil. + UpdateList(obj runtime.Object, resourceVersion uint64, continueValue string, remainingItemCount *int64) error + // PrepareObjectForStorage should set SelfLink and ResourceVersion to the empty value. Should + // return an error if the specified object cannot be updated. + PrepareObjectForStorage(obj runtime.Object) error + // ObjectResourceVersion returns the resource version (for persistence) of the specified object. + // Should return an error if the specified object does not have a persistable version. + ObjectResourceVersion(obj runtime.Object) (uint64, error) + + // ParseResourceVersion takes a resource version argument and + // converts it to the storage backend. For watch we should pass to helper.Watch(). + // Because resourceVersion is an opaque value, the default watch + // behavior for non-zero watch is to watch the next value (if you pass + // "1", you will see updates from "2" onwards). + ParseResourceVersion(resourceVersion string) (uint64, error) +} + +// ResponseMeta contains information about the database metadata that is associated with +// an object. It abstracts the actual underlying objects to prevent coupling with concrete +// database and to improve testability. +type ResponseMeta struct { + // TTL is the time to live of the node that contained the returned object. It may be + // zero or negative in some cases (objects may be expired after the requested + // expiration time due to server lag). + TTL int64 + // The resource version of the node that contained the returned object. + ResourceVersion uint64 +} + +// IndexerFunc is a function that for a given object computes +// for a particular . +type IndexerFunc func(obj runtime.Object) string + +// IndexerFuncs is a mapping from to function that +// for a given object computes . +type IndexerFuncs map[string]IndexerFunc + +// Everything accepts all objects. +var Everything = SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), +} + +// MatchValue defines a pair (, ). +type MatchValue struct { + IndexName string + Value string +} + +// Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update +// that is guaranteed to succeed. +// See the comment for GuaranteedUpdate for more details. +type UpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error) + +// ValidateObjectFunc is a function to act on a given object. An error may be returned +// if the hook cannot be completed. The function may NOT transform the provided +// object. +type ValidateObjectFunc func(ctx context.Context, obj runtime.Object) error + +// ValidateAllObjectFunc is a "admit everything" instance of ValidateObjectFunc. +func ValidateAllObjectFunc(ctx context.Context, obj runtime.Object) error { + return nil +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID `json:"uid,omitempty"` + // Specifies the target ResourceVersion + // +optional + ResourceVersion *string `json:"resourceVersion,omitempty"` +} + +// NewUIDPreconditions returns a Preconditions with UID set. +func NewUIDPreconditions(uid string) *Preconditions { + u := types.UID(uid) + return &Preconditions{UID: &u} +} + +func (p *Preconditions) Check(key string, obj runtime.Object) error { + if p == nil { + return nil + } + objMeta, err := meta.Accessor(obj) + if err != nil { + return NewInternalErrorf( + "can't enforce preconditions %v on un-introspectable object %v, got error: %v", + *p, + obj, + err) + } + if p.UID != nil && *p.UID != objMeta.GetUID() { + err := fmt.Sprintf( + "Precondition failed: UID in precondition: %v, UID in object meta: %v", + *p.UID, + objMeta.GetUID()) + return NewInvalidObjError(key, err) + } + if p.ResourceVersion != nil && *p.ResourceVersion != objMeta.GetResourceVersion() { + err := fmt.Sprintf( + "Precondition failed: ResourceVersion in precondition: %v, ResourceVersion in object meta: %v", + *p.ResourceVersion, + objMeta.GetResourceVersion()) + return NewInvalidObjError(key, err) + } + return nil +} + +// Interface offers a common interface for object marshaling/unmarshaling operations and +// hides all the storage-related operations behind it. +type Interface interface { + // Returns Versioner associated with this interface. + Versioner() Versioner + + // Create adds a new object at a key unless it already exists. 'ttl' is time-to-live + // in seconds (0 means forever). If no error is returned and out is not nil, out will be + // set to the read value from database. + Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error + + // Delete removes the specified key and returns the value that existed at that spot. + // If key didn't exist, it will return NotFound storage error. + Delete(ctx context.Context, key string, out runtime.Object, preconditions *Preconditions, validateDeletion ValidateObjectFunc) error + + // Watch begins watching the specified key. Events are decoded into API objects, + // and any items selected by 'p' are sent down to returned watch.Interface. + // resourceVersion may be used to specify what version to begin watching, + // which should be the current resourceVersion, and no longer rv+1 + // (e.g. reconnecting without missing any updates). + // If resource version is "0", this interface will get current object at given key + // and send it in an "ADDED" event, before watch starts. + Watch(ctx context.Context, key string, opts ListOptions) (watch.Interface, error) + + // WatchList begins watching the specified key's items. Items are decoded into API + // objects and any item selected by 'p' are sent down to returned watch.Interface. + // resourceVersion may be used to specify what version to begin watching, + // which should be the current resourceVersion, and no longer rv+1 + // (e.g. reconnecting without missing any updates). + // If resource version is "0", this interface will list current objects directory defined by key + // and send them in "ADDED" events, before watch starts. + WatchList(ctx context.Context, key string, opts ListOptions) (watch.Interface, error) + + // Get unmarshals json found at key into objPtr. On a not found error, will either + // return a zero object of the requested type, or an error, depending on 'opts.ignoreNotFound'. + // Treats empty responses and nil response nodes exactly like a not found error. + // The returned contents may be delayed, but it is guaranteed that they will + // match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'. + Get(ctx context.Context, key string, opts GetOptions, objPtr runtime.Object) error + + // GetToList unmarshals json found at key and opaque it into *List api object + // (an object that satisfies the runtime.IsList definition). + // The returned contents may be delayed, but it is guaranteed that they will + // match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'. + GetToList(ctx context.Context, key string, opts ListOptions, listObj runtime.Object) error + + // List unmarshalls jsons found at directory defined by key and opaque them + // into *List api object (an object that satisfies runtime.IsList definition). + // The returned contents may be delayed, but it is guaranteed that they will + // match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'. + List(ctx context.Context, key string, opts ListOptions, listObj runtime.Object) error + + // GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType') + // retrying the update until success if there is index conflict. + // Note that object passed to tryUpdate may change across invocations of tryUpdate() if + // other writers are simultaneously updating it, so tryUpdate() needs to take into account + // the current contents of the object when deciding how the update object should look. + // If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false + // or zero value in 'ptrToType' parameter otherwise. + // If the object to update has the same value as previous, it won't do any update + // but will return the object in 'ptrToType' parameter. + // If 'suggestion' is non-nil, it can be used as a suggestion about the current version + // of the object to avoid read operation from storage to get it. However, the + // implementations have to retry in case suggestion is stale. + // + // Example: + // + // s := /* implementation of Interface */ + // err := s.GuaranteedUpdate( + // "myKey", &MyType{}, true, + // func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { + // // Before each invocation of the user defined function, "input" is reset to + // // current contents for "myKey" in database. + // curr := input.(*MyType) // Guaranteed to succeed. + // + // // Make the modification + // curr.Counter++ + // + // // Return the modified object - return an error to stop iterating. Return + // // a uint64 to alter the TTL on the object, or nil to keep it the same value. + // return cur, nil, nil + // }, + // ) + GuaranteedUpdate( + ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, + precondtions *Preconditions, tryUpdate UpdateFunc, suggestion runtime.Object) error + + // Count returns number of different entries under the key (generally being path prefix). + Count(key string) (int64, error) +} + +// GetOptions provides the options that may be provided for storage get operations. +type GetOptions struct { + // IgnoreNotFound determines what is returned if the requested object is not found. If + // true, a zero object is returned. If false, an error is returned. + IgnoreNotFound bool + // ResourceVersion provides a resource version constraint to apply to the get operation + // as a "not older than" constraint: the result contains data at least as new as the provided + // ResourceVersion. The newest available data is preferred, but any data not older than this + // ResourceVersion may be served. + ResourceVersion string +} + +// ListOptions provides the options that may be provided for storage list operations. +type ListOptions struct { + // ResourceVersion provides a resource version constraint to apply to the list operation + // as a "not older than" constraint: the result contains data at least as new as the provided + // ResourceVersion. The newest available data is preferred, but any data not older than this + // ResourceVersion may be served. + ResourceVersion string + // ResourceVersionMatch provides the rule for how the resource version constraint applies. If set + // to the default value "" the legacy resource version semantic apply. + ResourceVersionMatch metav1.ResourceVersionMatch + // Predicate provides the selection rules for the list operation. + Predicate SelectionPredicate + // ProgressNotify determines whether storage-originated bookmark (progress notify) events should + // be delivered to the users. The option is ignored for non-watch requests. + ProgressNotify bool +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/names/generate.go b/vendor/k8s.io/apiserver/pkg/storage/names/generate.go new file mode 100644 index 0000000000..aad9a07f9a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/names/generate.go @@ -0,0 +1,54 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import ( + "fmt" + + utilrand "k8s.io/apimachinery/pkg/util/rand" +) + +// NameGenerator generates names for objects. Some backends may have more information +// available to guide selection of new names and this interface hides those details. +type NameGenerator interface { + // GenerateName generates a valid name from the base name, adding a random suffix to the + // the base. If base is valid, the returned name must also be valid. The generator is + // responsible for knowing the maximum valid name length. + GenerateName(base string) string +} + +// simpleNameGenerator generates random names. +type simpleNameGenerator struct{} + +// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics +// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes +// name (63 characters) +var SimpleNameGenerator NameGenerator = simpleNameGenerator{} + +const ( + // TODO: make this flexible for non-core resources with alternate naming rules. + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +func (simpleNameGenerator) GenerateName(base string) string { + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go b/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go new file mode 100644 index 0000000000..7370518e39 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go @@ -0,0 +1,159 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +// AttrFunc returns label and field sets and the uninitialized flag for List or Watch to match. +// In any failure to parse given object, it returns error. +type AttrFunc func(obj runtime.Object) (labels.Set, fields.Set, error) + +// FieldMutationFunc allows the mutation of the field selection fields. It is mutating to +// avoid the extra allocation on this common path +type FieldMutationFunc func(obj runtime.Object, fieldSet fields.Set) error + +func DefaultClusterScopedAttr(obj runtime.Object) (labels.Set, fields.Set, error) { + metadata, err := meta.Accessor(obj) + if err != nil { + return nil, nil, err + } + fieldSet := fields.Set{ + "metadata.name": metadata.GetName(), + } + + return labels.Set(metadata.GetLabels()), fieldSet, nil +} + +func DefaultNamespaceScopedAttr(obj runtime.Object) (labels.Set, fields.Set, error) { + metadata, err := meta.Accessor(obj) + if err != nil { + return nil, nil, err + } + fieldSet := fields.Set{ + "metadata.name": metadata.GetName(), + "metadata.namespace": metadata.GetNamespace(), + } + + return labels.Set(metadata.GetLabels()), fieldSet, nil +} + +func (f AttrFunc) WithFieldMutation(fieldMutator FieldMutationFunc) AttrFunc { + return func(obj runtime.Object) (labels.Set, fields.Set, error) { + labelSet, fieldSet, err := f(obj) + if err != nil { + return nil, nil, err + } + if err := fieldMutator(obj, fieldSet); err != nil { + return nil, nil, err + } + return labelSet, fieldSet, nil + } +} + +// SelectionPredicate is used to represent the way to select objects from api storage. +type SelectionPredicate struct { + Label labels.Selector + Field fields.Selector + GetAttrs AttrFunc + IndexLabels []string + IndexFields []string + Limit int64 + Continue string + AllowWatchBookmarks bool +} + +// Matches returns true if the given object's labels and fields (as +// returned by s.GetAttrs) match s.Label and s.Field. An error is +// returned if s.GetAttrs fails. +func (s *SelectionPredicate) Matches(obj runtime.Object) (bool, error) { + if s.Empty() { + return true, nil + } + labels, fields, err := s.GetAttrs(obj) + if err != nil { + return false, err + } + matched := s.Label.Matches(labels) + if matched && s.Field != nil { + matched = matched && s.Field.Matches(fields) + } + return matched, nil +} + +// MatchesObjectAttributes returns true if the given labels and fields +// match s.Label and s.Field. +func (s *SelectionPredicate) MatchesObjectAttributes(l labels.Set, f fields.Set) bool { + if s.Label.Empty() && s.Field.Empty() { + return true + } + matched := s.Label.Matches(l) + if matched && s.Field != nil { + matched = (matched && s.Field.Matches(f)) + } + return matched +} + +// MatchesSingle will return (name, true) if and only if s.Field matches on the object's +// name. +func (s *SelectionPredicate) MatchesSingle() (string, bool) { + if len(s.Continue) > 0 { + return "", false + } + // TODO: should be namespace.name + if name, ok := s.Field.RequiresExactMatch("metadata.name"); ok { + return name, true + } + return "", false +} + +// Empty returns true if the predicate performs no filtering. +func (s *SelectionPredicate) Empty() bool { + return s.Label.Empty() && s.Field.Empty() +} + +// For any index defined by IndexFields, if a matcher can match only (a subset) +// of objects that return for a given index, a pair (, ) +// wil be returned. +func (s *SelectionPredicate) MatcherIndex() []MatchValue { + var result []MatchValue + for _, field := range s.IndexFields { + if value, ok := s.Field.RequiresExactMatch(field); ok { + result = append(result, MatchValue{IndexName: FieldIndex(field), Value: value}) + } + } + for _, label := range s.IndexLabels { + if value, ok := s.Label.RequiresExactMatch(label); ok { + result = append(result, MatchValue{IndexName: LabelIndex(label), Value: value}) + } + } + return result +} + +// LabelIndex add prefix for label index. +func LabelIndex(label string) string { + return "l:" + label +} + +// FiledIndex add prefix for field index. +func FieldIndex(field string) string { + return "f:" + field +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS new file mode 100644 index 0000000000..dbef228c1a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- timothysc +- hongchaodeng diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go new file mode 100644 index 0000000000..af94efcea8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go @@ -0,0 +1,91 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storagebackend + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/server/egressselector" + "k8s.io/apiserver/pkg/storage/value" +) + +const ( + StorageTypeUnset = "" + StorageTypeETCD3 = "etcd3" + + DefaultCompactInterval = 5 * time.Minute + DefaultDBMetricPollInterval = 30 * time.Second + DefaultHealthcheckTimeout = 2 * time.Second +) + +// TransportConfig holds all connection related info, i.e. equal TransportConfig means equal servers we talk to. +type TransportConfig struct { + // ServerList is the list of storage servers to connect with. + ServerList []string + // TLS credentials + KeyFile string + CertFile string + TrustedCAFile string + // function to determine the egress dialer. (i.e. konnectivity server dialer) + EgressLookup egressselector.Lookup +} + +// Config is configuration for creating a storage backend. +type Config struct { + // Type defines the type of storage backend. Default ("") is "etcd3". + Type string + // Prefix is the prefix to all keys passed to storage.Interface methods. + Prefix string + // Transport holds all connection related info, i.e. equal TransportConfig means equal servers we talk to. + Transport TransportConfig + // Paging indicates whether the server implementation should allow paging (if it is + // supported). This is generally configured by feature gating, or by a specific + // resource type not wishing to allow paging, and is not intended for end users to + // set. + Paging bool + + Codec runtime.Codec + // EncodeVersioner is the same groupVersioner used to build the + // storage encoder. Given a list of kinds the input object might belong + // to, the EncodeVersioner outputs the gvk the object will be + // converted to before persisted in etcd. + EncodeVersioner runtime.GroupVersioner + // Transformer allows the value to be transformed prior to persisting into etcd. + Transformer value.Transformer + + // CompactionInterval is an interval of requesting compaction from apiserver. + // If the value is 0, no compaction will be issued. + CompactionInterval time.Duration + // CountMetricPollPeriod specifies how often should count metric be updated + CountMetricPollPeriod time.Duration + // DBMetricPollInterval specifies how often should storage backend metric be updated. + DBMetricPollInterval time.Duration + // HealthcheckTimeout specifies the timeout used when checking health + HealthcheckTimeout time.Duration +} + +func NewDefaultConfig(prefix string, codec runtime.Codec) *Config { + return &Config{ + Paging: true, + Prefix: prefix, + Codec: codec, + CompactionInterval: DefaultCompactInterval, + DBMetricPollInterval: DefaultDBMetricPollInterval, + HealthcheckTimeout: DefaultHealthcheckTimeout, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go new file mode 100644 index 0000000000..9a1618df8d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -0,0 +1,291 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package factory + +import ( + "context" + "fmt" + "net" + "net/url" + "path" + "sync" + "sync/atomic" + "time" + + grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/pkg/transport" + "google.golang.org/grpc" + + "k8s.io/apimachinery/pkg/runtime" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/server/egressselector" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/etcd3" + "k8s.io/apiserver/pkg/storage/etcd3/metrics" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/apiserver/pkg/storage/value" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" +) + +const ( + // The short keepalive timeout and interval have been chosen to aggressively + // detect a failed etcd server without introducing much overhead. + keepaliveTime = 30 * time.Second + keepaliveTimeout = 10 * time.Second + + // dialTimeout is the timeout for failing to establish a connection. + // It is set to 20 seconds as times shorter than that will cause TLS connections to fail + // on heavily loaded arm64 CPUs (issue #64649) + dialTimeout = 20 * time.Second + + dbMetricsMonitorJitter = 0.5 +) + +func init() { + // grpcprom auto-registers (via an init function) their client metrics, since we are opting out of + // using the global prometheus registry and using our own wrapped global registry, + // we need to explicitly register these metrics to our global registry here. + // For reference: https://github.com/kubernetes/kubernetes/pull/81387 + legacyregistry.RawMustRegister(grpcprom.DefaultClientMetrics) + dbMetricsMonitors = make(map[string]struct{}) +} + +func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) { + // constructing the etcd v3 client blocks and times out if etcd is not available. + // retry in a loop in the background until we successfully create the client, storing the client or error encountered + + clientValue := &atomic.Value{} + + clientErrMsg := &atomic.Value{} + clientErrMsg.Store("etcd client connection not yet established") + + go wait.PollUntil(time.Second, func() (bool, error) { + client, err := newETCD3Client(c.Transport) + if err != nil { + clientErrMsg.Store(err.Error()) + return false, nil + } + clientValue.Store(client) + clientErrMsg.Store("") + return true, nil + }, wait.NeverStop) + + return func() error { + if errMsg := clientErrMsg.Load().(string); len(errMsg) > 0 { + return fmt.Errorf(errMsg) + } + client := clientValue.Load().(*clientv3.Client) + healthcheckTimeout := storagebackend.DefaultHealthcheckTimeout + if c.HealthcheckTimeout != time.Duration(0) { + healthcheckTimeout = c.HealthcheckTimeout + } + ctx, cancel := context.WithTimeout(context.Background(), healthcheckTimeout) + defer cancel() + // See https://github.com/etcd-io/etcd/blob/c57f8b3af865d1b531b979889c602ba14377420e/etcdctl/ctlv3/command/ep_command.go#L118 + _, err := client.Get(ctx, path.Join("/", c.Prefix, "health")) + if err == nil { + return nil + } + return fmt.Errorf("error getting data from etcd: %v", err) + }, nil +} + +func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) { + tlsInfo := transport.TLSInfo{ + CertFile: c.CertFile, + KeyFile: c.KeyFile, + TrustedCAFile: c.TrustedCAFile, + } + tlsConfig, err := tlsInfo.ClientConfig() + if err != nil { + return nil, err + } + // NOTE: Client relies on nil tlsConfig + // for non-secure connections, update the implicit variable + if len(c.CertFile) == 0 && len(c.KeyFile) == 0 && len(c.TrustedCAFile) == 0 { + tlsConfig = nil + } + networkContext := egressselector.Etcd.AsNetworkContext() + var egressDialer utilnet.DialFunc + if c.EgressLookup != nil { + egressDialer, err = c.EgressLookup(networkContext) + if err != nil { + return nil, err + } + } + dialOptions := []grpc.DialOption{ + grpc.WithBlock(), // block until the underlying connection is up + grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor), + grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor), + } + if egressDialer != nil { + dialer := func(ctx context.Context, addr string) (net.Conn, error) { + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + return egressDialer(ctx, "tcp", u.Host) + } + dialOptions = append(dialOptions, grpc.WithContextDialer(dialer)) + } + cfg := clientv3.Config{ + DialTimeout: dialTimeout, + DialKeepAliveTime: keepaliveTime, + DialKeepAliveTimeout: keepaliveTimeout, + DialOptions: dialOptions, + Endpoints: c.ServerList, + TLS: tlsConfig, + } + + return clientv3.New(cfg) +} + +type runningCompactor struct { + interval time.Duration + cancel context.CancelFunc + client *clientv3.Client + refs int +} + +var ( + // compactorsMu guards access to compactors map + compactorsMu sync.Mutex + compactors = map[string]*runningCompactor{} + // dbMetricsMonitorsMu guards access to dbMetricsMonitors map + dbMetricsMonitorsMu sync.Mutex + dbMetricsMonitors map[string]struct{} +) + +// startCompactorOnce start one compactor per transport. If the interval get smaller on repeated calls, the +// compactor is replaced. A destroy func is returned. If all destroy funcs with the same transport are called, +// the compactor is stopped. +func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration) (func(), error) { + compactorsMu.Lock() + defer compactorsMu.Unlock() + + key := fmt.Sprintf("%v", c) // gives: {[server1 server2] keyFile certFile caFile} + if compactor, foundBefore := compactors[key]; !foundBefore || compactor.interval > interval { + compactorClient, err := newETCD3Client(c) + if err != nil { + return nil, err + } + + if foundBefore { + // replace compactor + compactor.cancel() + compactor.client.Close() + } else { + // start new compactor + compactor = &runningCompactor{} + compactors[key] = compactor + } + + ctx, cancel := context.WithCancel(context.Background()) + + compactor.interval = interval + compactor.cancel = cancel + compactor.client = compactorClient + + etcd3.StartCompactor(ctx, compactorClient, interval) + } + + compactors[key].refs++ + + return func() { + compactorsMu.Lock() + defer compactorsMu.Unlock() + + compactor := compactors[key] + compactor.refs-- + if compactor.refs == 0 { + compactor.cancel() + compactor.client.Close() + delete(compactors, key) + } + }, nil +} + +func newETCD3Storage(c storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) { + stopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval) + if err != nil { + return nil, nil, err + } + + client, err := newETCD3Client(c.Transport) + if err != nil { + stopCompactor() + return nil, nil, err + } + + stopDBSizeMonitor, err := startDBSizeMonitorPerEndpoint(client, c.DBMetricPollInterval) + if err != nil { + return nil, nil, err + } + + var once sync.Once + destroyFunc := func() { + // we know that storage destroy funcs are called multiple times (due to reuse in subresources). + // Hence, we only destroy once. + // TODO: fix duplicated storage destroy calls higher level + once.Do(func() { + stopCompactor() + stopDBSizeMonitor() + client.Close() + }) + } + transformer := c.Transformer + if transformer == nil { + transformer = value.IdentityTransformer + } + return etcd3.New(client, c.Codec, newFunc, c.Prefix, transformer, c.Paging), destroyFunc, nil +} + +// startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the +// corresponding metric etcd_db_total_size_in_bytes for each etcd server endpoint. +func startDBSizeMonitorPerEndpoint(client *clientv3.Client, interval time.Duration) (func(), error) { + if interval == 0 { + return func() {}, nil + } + dbMetricsMonitorsMu.Lock() + defer dbMetricsMonitorsMu.Unlock() + + ctx, cancel := context.WithCancel(context.Background()) + for _, ep := range client.Endpoints() { + if _, found := dbMetricsMonitors[ep]; found { + continue + } + dbMetricsMonitors[ep] = struct{}{} + endpoint := ep + klog.V(4).Infof("Start monitoring storage db size metric for endpoint %s with polling interval %v", endpoint, interval) + go wait.JitterUntilWithContext(ctx, func(context.Context) { + epStatus, err := client.Maintenance.Status(ctx, endpoint) + if err != nil { + klog.V(4).Infof("Failed to get storage db size for ep %s: %v", endpoint, err) + metrics.UpdateEtcdDbSize(endpoint, -1) + } else { + metrics.UpdateEtcdDbSize(endpoint, epStatus.DbSize) + } + }, interval, dbMetricsMonitorJitter, true) + } + + return func() { + cancel() + }, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go new file mode 100644 index 0000000000..1e8a8cdb0f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package factory + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/storagebackend" +) + +// DestroyFunc is to destroy any resources used by the storage returned in Create() together. +type DestroyFunc func() + +// Create creates a storage backend based on given config. +func Create(c storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) { + switch c.Type { + case "etcd2": + return nil, nil, fmt.Errorf("%v is no longer a supported storage backend", c.Type) + case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3: + return newETCD3Storage(c, newFunc) + default: + return nil, nil, fmt.Errorf("unknown storage type: %s", c.Type) + } +} + +// CreateHealthCheck creates a healthcheck function based on given config. +func CreateHealthCheck(c storagebackend.Config) (func() error, error) { + switch c.Type { + case "etcd2": + return nil, fmt.Errorf("%v is no longer a supported storage backend", c.Type) + case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3: + return newETCD3HealthCheck(c) + default: + return nil, fmt.Errorf("unknown storage type: %s", c.Type) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/util.go b/vendor/k8s.io/apiserver/pkg/storage/util.go new file mode 100644 index 0000000000..9da8d9713c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/util.go @@ -0,0 +1,81 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "sync/atomic" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/validation/path" + "k8s.io/apimachinery/pkg/runtime" +) + +type SimpleUpdateFunc func(runtime.Object) (runtime.Object, error) + +// SimpleUpdateFunc converts SimpleUpdateFunc into UpdateFunc +func SimpleUpdate(fn SimpleUpdateFunc) UpdateFunc { + return func(input runtime.Object, _ ResponseMeta) (runtime.Object, *uint64, error) { + out, err := fn(input) + return out, nil, err + } +} + +func EverythingFunc(runtime.Object) bool { + return true +} + +func NamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return "", err + } + name := meta.GetName() + if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", fmt.Errorf("invalid name: %v", msgs) + } + return prefix + "/" + meta.GetNamespace() + "/" + name, nil +} + +func NoNamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return "", err + } + name := meta.GetName() + if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", fmt.Errorf("invalid name: %v", msgs) + } + return prefix + "/" + name, nil +} + +// HighWaterMark is a thread-safe object for tracking the maximum value seen +// for some quantity. +type HighWaterMark int64 + +// Update returns true if and only if 'current' is the highest value ever seen. +func (hwm *HighWaterMark) Update(current int64) bool { + for { + old := atomic.LoadInt64((*int64)(hwm)) + if current <= old { + return false + } + if atomic.CompareAndSwapInt64((*int64)(hwm), old, current) { + return true + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go new file mode 100644 index 0000000000..daa82f711f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go @@ -0,0 +1,152 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package aes transforms values for storage at rest using AES-GCM. +package aes + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "errors" + "fmt" + "io" + + "k8s.io/apiserver/pkg/storage/value" +) + +// gcm implements AEAD encryption of the provided values given a cipher.Block algorithm. +// The authenticated data provided as part of the value.Context method must match when the same +// value is set to and loaded from storage. In order to ensure that values cannot be copied by +// an attacker from a location under their control, use characteristics of the storage location +// (such as the etcd key) as part of the authenticated data. +// +// Because this mode requires a generated IV and IV reuse is a known weakness of AES-GCM, keys +// must be rotated before a birthday attack becomes feasible. NIST SP 800-38D +// (http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf) recommends using the same +// key with random 96-bit nonces (the default nonce length) no more than 2^32 times, and +// therefore transformers using this implementation *must* ensure they allow for frequent key +// rotation. Future work should include investigation of AES-GCM-SIV as an alternative to +// random nonces. +type gcm struct { + block cipher.Block +} + +// NewGCMTransformer takes the given block cipher and performs encryption and decryption on the given +// data. +func NewGCMTransformer(block cipher.Block) value.Transformer { + return &gcm{block: block} +} + +func (t *gcm) TransformFromStorage(data []byte, context value.Context) ([]byte, bool, error) { + aead, err := cipher.NewGCM(t.block) + if err != nil { + return nil, false, err + } + nonceSize := aead.NonceSize() + if len(data) < nonceSize { + return nil, false, fmt.Errorf("the stored data was shorter than the required size") + } + result, err := aead.Open(nil, data[:nonceSize], data[nonceSize:], context.AuthenticatedData()) + return result, false, err +} + +func (t *gcm) TransformToStorage(data []byte, context value.Context) ([]byte, error) { + aead, err := cipher.NewGCM(t.block) + if err != nil { + return nil, err + } + nonceSize := aead.NonceSize() + result := make([]byte, nonceSize+aead.Overhead()+len(data)) + n, err := rand.Read(result[:nonceSize]) + if err != nil { + return nil, err + } + if n != nonceSize { + return nil, fmt.Errorf("unable to read sufficient random bytes") + } + cipherText := aead.Seal(result[nonceSize:nonceSize], result[:nonceSize], data, context.AuthenticatedData()) + return result[:nonceSize+len(cipherText)], nil +} + +// cbc implements encryption at rest of the provided values given a cipher.Block algorithm. +type cbc struct { + block cipher.Block +} + +// NewCBCTransformer takes the given block cipher and performs encryption and decryption on the given +// data. +func NewCBCTransformer(block cipher.Block) value.Transformer { + return &cbc{block: block} +} + +var ( + errInvalidBlockSize = fmt.Errorf("the stored data is not a multiple of the block size") + errInvalidPKCS7Data = errors.New("invalid PKCS7 data (empty or not padded)") + errInvalidPKCS7Padding = errors.New("invalid padding on input") +) + +func (t *cbc) TransformFromStorage(data []byte, context value.Context) ([]byte, bool, error) { + blockSize := aes.BlockSize + if len(data) < blockSize { + return nil, false, fmt.Errorf("the stored data was shorter than the required size") + } + iv := data[:blockSize] + data = data[blockSize:] + + if len(data)%blockSize != 0 { + return nil, false, errInvalidBlockSize + } + + result := make([]byte, len(data)) + copy(result, data) + mode := cipher.NewCBCDecrypter(t.block, iv) + mode.CryptBlocks(result, result) + + // remove and verify PKCS#7 padding for CBC + c := result[len(result)-1] + paddingSize := int(c) + size := len(result) - paddingSize + if paddingSize == 0 || paddingSize > len(result) { + return nil, false, errInvalidPKCS7Data + } + for i := 0; i < paddingSize; i++ { + if result[size+i] != c { + return nil, false, errInvalidPKCS7Padding + } + } + + return result[:size], false, nil +} + +func (t *cbc) TransformToStorage(data []byte, context value.Context) ([]byte, error) { + blockSize := aes.BlockSize + paddingSize := blockSize - (len(data) % blockSize) + result := make([]byte, blockSize+len(data)+paddingSize) + iv := result[:blockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return nil, fmt.Errorf("unable to read sufficient random bytes") + } + copy(result[blockSize:], data) + + // add PKCS#7 padding for CBC + copy(result[blockSize+len(data):], bytes.Repeat([]byte{byte(paddingSize)}, paddingSize)) + + mode := cipher.NewCBCEncrypter(t.block, iv) + mode.CryptBlocks(result[blockSize:], result[blockSize:]) + return result, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go new file mode 100644 index 0000000000..20ca3f6c92 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go @@ -0,0 +1,200 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envelope transforms values for storage at rest using a Envelope provider +package envelope + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/base64" + "fmt" + "time" + + "k8s.io/apiserver/pkg/storage/value" + + lru "github.com/hashicorp/golang-lru" + "golang.org/x/crypto/cryptobyte" +) + +func init() { + value.RegisterMetrics() + registerMetrics() +} + +// Service allows encrypting and decrypting data using an external Key Management Service. +type Service interface { + // Decrypt a given bytearray to obtain the original data as bytes. + Decrypt(data []byte) ([]byte, error) + // Encrypt bytes to a ciphertext. + Encrypt(data []byte) ([]byte, error) +} + +type envelopeTransformer struct { + envelopeService Service + + // transformers is a thread-safe LRU cache which caches decrypted DEKs indexed by their encrypted form. + transformers *lru.Cache + + // baseTransformerFunc creates a new transformer for encrypting the data with the DEK. + baseTransformerFunc func(cipher.Block) value.Transformer + + cacheSize int + cacheEnabled bool +} + +// NewEnvelopeTransformer returns a transformer which implements a KEK-DEK based envelope encryption scheme. +// It uses envelopeService to encrypt and decrypt DEKs. Respective DEKs (in encrypted form) are prepended to +// the data items they encrypt. A cache (of size cacheSize) is maintained to store the most recently +// used decrypted DEKs in memory. +func NewEnvelopeTransformer(envelopeService Service, cacheSize int, baseTransformerFunc func(cipher.Block) value.Transformer) (value.Transformer, error) { + var ( + cache *lru.Cache + err error + ) + + if cacheSize > 0 { + cache, err = lru.New(cacheSize) + if err != nil { + return nil, err + } + } + return &envelopeTransformer{ + envelopeService: envelopeService, + transformers: cache, + baseTransformerFunc: baseTransformerFunc, + cacheEnabled: cacheSize > 0, + cacheSize: cacheSize, + }, nil +} + +// TransformFromStorage decrypts data encrypted by this transformer using envelope encryption. +func (t *envelopeTransformer) TransformFromStorage(data []byte, context value.Context) ([]byte, bool, error) { + recordArrival(fromStorageLabel, time.Now()) + + // Read the 16 bit length-of-DEK encoded at the start of the encrypted DEK. 16 bits can + // represent a maximum key length of 65536 bytes. We are using a 256 bit key, whose + // length cannot fit in 8 bits (1 byte). Thus, we use 16 bits (2 bytes) to store the length. + var encKey cryptobyte.String + s := cryptobyte.String(data) + if ok := s.ReadUint16LengthPrefixed(&encKey); !ok { + return nil, false, fmt.Errorf("invalid data encountered by envelope transformer: failed to read uint16 length prefixed data") + } + + encData := []byte(s) + + // Look up the decrypted DEK from cache or Envelope. + transformer := t.getTransformer(encKey) + if transformer == nil { + if t.cacheEnabled { + value.RecordCacheMiss() + } + key, err := t.envelopeService.Decrypt(encKey) + if err != nil { + // Do NOT wrap this err using fmt.Errorf() or similar functions + // because this gRPC status error has useful error code when + // record the metric. + return nil, false, err + } + + transformer, err = t.addTransformer(encKey, key) + if err != nil { + return nil, false, err + } + } + + return transformer.TransformFromStorage(encData, context) +} + +// TransformToStorage encrypts data to be written to disk using envelope encryption. +func (t *envelopeTransformer) TransformToStorage(data []byte, context value.Context) ([]byte, error) { + recordArrival(toStorageLabel, time.Now()) + newKey, err := generateKey(32) + if err != nil { + return nil, err + } + + encKey, err := t.envelopeService.Encrypt(newKey) + if err != nil { + // Do NOT wrap this err using fmt.Errorf() or similar functions + // because this gRPC status error has useful error code when + // record the metric. + return nil, err + } + + transformer, err := t.addTransformer(encKey, newKey) + if err != nil { + return nil, err + } + + result, err := transformer.TransformToStorage(data, context) + if err != nil { + return nil, err + } + // Append the length of the encrypted DEK as the first 2 bytes. + b := cryptobyte.NewBuilder(nil) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte(encKey)) + }) + b.AddBytes(result) + + return b.Bytes() +} + +var _ value.Transformer = &envelopeTransformer{} + +// addTransformer inserts a new transformer to the Envelope cache of DEKs for future reads. +func (t *envelopeTransformer) addTransformer(encKey []byte, key []byte) (value.Transformer, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + transformer := t.baseTransformerFunc(block) + // Use base64 of encKey as the key into the cache because hashicorp/golang-lru + // cannot hash []uint8. + if t.cacheEnabled { + t.transformers.Add(base64.StdEncoding.EncodeToString(encKey), transformer) + dekCacheFillPercent.Set(float64(t.transformers.Len()) / float64(t.cacheSize)) + } + return transformer, nil +} + +// getTransformer fetches the transformer corresponding to encKey from cache, if it exists. +func (t *envelopeTransformer) getTransformer(encKey []byte) value.Transformer { + if !t.cacheEnabled { + return nil + } + + _transformer, found := t.transformers.Get(base64.StdEncoding.EncodeToString(encKey)) + if found { + return _transformer.(value.Transformer) + } + return nil +} + +// generateKey generates a random key using system randomness. +func generateKey(length int) (key []byte, err error) { + defer func(start time.Time) { + value.RecordDataKeyGeneration(start, err) + }(time.Now()) + key = make([]byte, length) + if _, err = rand.Read(key); err != nil { + return nil, err + } + + return key, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go new file mode 100644 index 0000000000..7aa5d232f8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go @@ -0,0 +1,181 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envelope transforms values for storage at rest using a Envelope provider +package envelope + +import ( + "context" + "fmt" + "net" + "net/url" + "strings" + "sync" + "time" + + "k8s.io/klog/v2" + + "google.golang.org/grpc" + + kmsapi "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1" +) + +const ( + // Now only supported unix domain socket. + unixProtocol = "unix" + + // Current version for the protocol interface definition. + kmsapiVersion = "v1beta1" + + versionErrorf = "KMS provider api version %s is not supported, only %s is supported now" +) + +// The gRPC implementation for envelope.Service. +type gRPCService struct { + kmsClient kmsapi.KeyManagementServiceClient + connection *grpc.ClientConn + callTimeout time.Duration + mux sync.RWMutex + versionChecked bool +} + +// NewGRPCService returns an envelope.Service which use gRPC to communicate the remote KMS provider. +func NewGRPCService(endpoint string, callTimeout time.Duration) (Service, error) { + klog.V(4).Infof("Configure KMS provider with endpoint: %s", endpoint) + + addr, err := parseEndpoint(endpoint) + if err != nil { + return nil, err + } + + s := &gRPCService{callTimeout: callTimeout} + s.connection, err = grpc.Dial( + addr, + grpc.WithInsecure(), + grpc.WithUnaryInterceptor(s.interceptor), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + grpc.WithContextDialer( + func(context.Context, string) (net.Conn, error) { + // Ignoring addr and timeout arguments: + // addr - comes from the closure + c, err := net.DialUnix(unixProtocol, nil, &net.UnixAddr{Name: addr}) + if err != nil { + klog.Errorf("failed to create connection to unix socket: %s, error: %v", addr, err) + } else { + klog.V(4).Infof("Successfully dialed Unix socket %v", addr) + } + return c, err + })) + + if err != nil { + return nil, fmt.Errorf("failed to create connection to %s, error: %v", endpoint, err) + } + + s.kmsClient = kmsapi.NewKeyManagementServiceClient(s.connection) + return s, nil +} + +// Parse the endpoint to extract schema, host or path. +func parseEndpoint(endpoint string) (string, error) { + if len(endpoint) == 0 { + return "", fmt.Errorf("remote KMS provider can't use empty string as endpoint") + } + + u, err := url.Parse(endpoint) + if err != nil { + return "", fmt.Errorf("invalid endpoint %q for remote KMS provider, error: %v", endpoint, err) + } + + if u.Scheme != unixProtocol { + return "", fmt.Errorf("unsupported scheme %q for remote KMS provider", u.Scheme) + } + + // Linux abstract namespace socket - no physical file required + // Warning: Linux Abstract sockets have not concept of ACL (unlike traditional file based sockets). + // However, Linux Abstract sockets are subject to Linux networking namespace, so will only be accessible to + // containers within the same pod (unless host networking is used). + if strings.HasPrefix(u.Path, "/@") { + return strings.TrimPrefix(u.Path, "/"), nil + } + + return u.Path, nil +} + +func (g *gRPCService) checkAPIVersion(ctx context.Context) error { + g.mux.Lock() + defer g.mux.Unlock() + + if g.versionChecked { + return nil + } + + request := &kmsapi.VersionRequest{Version: kmsapiVersion} + response, err := g.kmsClient.Version(ctx, request) + if err != nil { + return fmt.Errorf("failed get version from remote KMS provider: %v", err) + } + if response.Version != kmsapiVersion { + return fmt.Errorf(versionErrorf, response.Version, kmsapiVersion) + } + g.versionChecked = true + + klog.V(4).Infof("Version of KMS provider is %s", response.Version) + return nil +} + +// Decrypt a given data string to obtain the original byte data. +func (g *gRPCService) Decrypt(cipher []byte) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), g.callTimeout) + defer cancel() + + request := &kmsapi.DecryptRequest{Cipher: cipher, Version: kmsapiVersion} + response, err := g.kmsClient.Decrypt(ctx, request) + if err != nil { + return nil, err + } + return response.Plain, nil +} + +// Encrypt bytes to a string ciphertext. +func (g *gRPCService) Encrypt(plain []byte) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), g.callTimeout) + defer cancel() + + request := &kmsapi.EncryptRequest{Plain: plain, Version: kmsapiVersion} + response, err := g.kmsClient.Encrypt(ctx, request) + if err != nil { + return nil, err + } + return response.Cipher, nil +} + +func (g *gRPCService) interceptor( + ctx context.Context, + method string, + req interface{}, + reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, +) error { + if !kmsapi.IsVersionCheckMethod(method) { + if err := g.checkAPIVersion(ctx); err != nil { + return err + } + } + + return invoker(ctx, method, req, reply, cc, opts...) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics.go new file mode 100644 index 0000000000..285ae14be4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics.go @@ -0,0 +1,102 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envelope + +import ( + "sync" + "time" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +const ( + namespace = "apiserver" + subsystem = "envelope_encryption" + fromStorageLabel = "from_storage" + toStorageLabel = "to_storage" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + lockLastFromStorage sync.Mutex + lockLastToStorage sync.Mutex + + lastFromStorage time.Time + lastToStorage time.Time + + dekCacheFillPercent = metrics.NewGauge( + &metrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dek_cache_fill_percent", + Help: "Percent of the cache slots currently occupied by cached DEKs.", + StabilityLevel: metrics.ALPHA, + }, + ) + + dekCacheInterArrivals = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dek_cache_inter_arrival_time_seconds", + Help: "Time (in seconds) of inter arrival of transformation requests.", + StabilityLevel: metrics.ALPHA, + Buckets: metrics.ExponentialBuckets(60, 2, 10), + }, + []string{"transformation_type"}, + ) +) + +var registerMetricsFunc sync.Once + +func registerMetrics() { + registerMetricsFunc.Do(func() { + legacyregistry.MustRegister(dekCacheFillPercent) + legacyregistry.MustRegister(dekCacheInterArrivals) + }) +} + +func recordArrival(transformationType string, start time.Time) { + switch transformationType { + case fromStorageLabel: + lockLastFromStorage.Lock() + defer lockLastFromStorage.Unlock() + + if lastFromStorage.IsZero() { + lastFromStorage = start + } + dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastFromStorage).Seconds()) + lastFromStorage = start + case toStorageLabel: + lockLastToStorage.Lock() + defer lockLastToStorage.Unlock() + + if lastToStorage.IsZero() { + lastToStorage = start + } + dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastToStorage).Seconds()) + lastToStorage = start + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.pb.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.pb.go new file mode 100644 index 0000000000..0d71bb2ba7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.pb.go @@ -0,0 +1,502 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: service.proto + +package v1beta1 + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type VersionRequest struct { + // Version of the KMS plugin API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionRequest) Reset() { *m = VersionRequest{} } +func (m *VersionRequest) String() string { return proto.CompactTextString(m) } +func (*VersionRequest) ProtoMessage() {} +func (*VersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{0} +} +func (m *VersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VersionRequest.Unmarshal(m, b) +} +func (m *VersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VersionRequest.Marshal(b, m, deterministic) +} +func (m *VersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionRequest.Merge(m, src) +} +func (m *VersionRequest) XXX_Size() int { + return xxx_messageInfo_VersionRequest.Size(m) +} +func (m *VersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionRequest proto.InternalMessageInfo + +func (m *VersionRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type VersionResponse struct { + // Version of the KMS plugin API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Name of the KMS provider. + RuntimeName string `protobuf:"bytes,2,opt,name=runtime_name,json=runtimeName,proto3" json:"runtime_name,omitempty"` + // Version of the KMS provider. The string must be semver-compatible. + RuntimeVersion string `protobuf:"bytes,3,opt,name=runtime_version,json=runtimeVersion,proto3" json:"runtime_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionResponse) Reset() { *m = VersionResponse{} } +func (m *VersionResponse) String() string { return proto.CompactTextString(m) } +func (*VersionResponse) ProtoMessage() {} +func (*VersionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{1} +} +func (m *VersionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VersionResponse.Unmarshal(m, b) +} +func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic) +} +func (m *VersionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionResponse.Merge(m, src) +} +func (m *VersionResponse) XXX_Size() int { + return xxx_messageInfo_VersionResponse.Size(m) +} +func (m *VersionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VersionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionResponse proto.InternalMessageInfo + +func (m *VersionResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *VersionResponse) GetRuntimeName() string { + if m != nil { + return m.RuntimeName + } + return "" +} + +func (m *VersionResponse) GetRuntimeVersion() string { + if m != nil { + return m.RuntimeVersion + } + return "" +} + +type DecryptRequest struct { + // Version of the KMS plugin API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // The data to be decrypted. + Cipher []byte `protobuf:"bytes,2,opt,name=cipher,proto3" json:"cipher,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DecryptRequest) Reset() { *m = DecryptRequest{} } +func (m *DecryptRequest) String() string { return proto.CompactTextString(m) } +func (*DecryptRequest) ProtoMessage() {} +func (*DecryptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{2} +} +func (m *DecryptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DecryptRequest.Unmarshal(m, b) +} +func (m *DecryptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DecryptRequest.Marshal(b, m, deterministic) +} +func (m *DecryptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DecryptRequest.Merge(m, src) +} +func (m *DecryptRequest) XXX_Size() int { + return xxx_messageInfo_DecryptRequest.Size(m) +} +func (m *DecryptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DecryptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DecryptRequest proto.InternalMessageInfo + +func (m *DecryptRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *DecryptRequest) GetCipher() []byte { + if m != nil { + return m.Cipher + } + return nil +} + +type DecryptResponse struct { + // The decrypted data. + Plain []byte `protobuf:"bytes,1,opt,name=plain,proto3" json:"plain,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DecryptResponse) Reset() { *m = DecryptResponse{} } +func (m *DecryptResponse) String() string { return proto.CompactTextString(m) } +func (*DecryptResponse) ProtoMessage() {} +func (*DecryptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{3} +} +func (m *DecryptResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DecryptResponse.Unmarshal(m, b) +} +func (m *DecryptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DecryptResponse.Marshal(b, m, deterministic) +} +func (m *DecryptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DecryptResponse.Merge(m, src) +} +func (m *DecryptResponse) XXX_Size() int { + return xxx_messageInfo_DecryptResponse.Size(m) +} +func (m *DecryptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DecryptResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DecryptResponse proto.InternalMessageInfo + +func (m *DecryptResponse) GetPlain() []byte { + if m != nil { + return m.Plain + } + return nil +} + +type EncryptRequest struct { + // Version of the KMS plugin API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // The data to be encrypted. + Plain []byte `protobuf:"bytes,2,opt,name=plain,proto3" json:"plain,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptRequest) Reset() { *m = EncryptRequest{} } +func (m *EncryptRequest) String() string { return proto.CompactTextString(m) } +func (*EncryptRequest) ProtoMessage() {} +func (*EncryptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{4} +} +func (m *EncryptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptRequest.Unmarshal(m, b) +} +func (m *EncryptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptRequest.Marshal(b, m, deterministic) +} +func (m *EncryptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptRequest.Merge(m, src) +} +func (m *EncryptRequest) XXX_Size() int { + return xxx_messageInfo_EncryptRequest.Size(m) +} +func (m *EncryptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptRequest proto.InternalMessageInfo + +func (m *EncryptRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *EncryptRequest) GetPlain() []byte { + if m != nil { + return m.Plain + } + return nil +} + +type EncryptResponse struct { + // The encrypted data. + Cipher []byte `protobuf:"bytes,1,opt,name=cipher,proto3" json:"cipher,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptResponse) Reset() { *m = EncryptResponse{} } +func (m *EncryptResponse) String() string { return proto.CompactTextString(m) } +func (*EncryptResponse) ProtoMessage() {} +func (*EncryptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{5} +} +func (m *EncryptResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptResponse.Unmarshal(m, b) +} +func (m *EncryptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptResponse.Marshal(b, m, deterministic) +} +func (m *EncryptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptResponse.Merge(m, src) +} +func (m *EncryptResponse) XXX_Size() int { + return xxx_messageInfo_EncryptResponse.Size(m) +} +func (m *EncryptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptResponse proto.InternalMessageInfo + +func (m *EncryptResponse) GetCipher() []byte { + if m != nil { + return m.Cipher + } + return nil +} + +func init() { + proto.RegisterType((*VersionRequest)(nil), "v1beta1.VersionRequest") + proto.RegisterType((*VersionResponse)(nil), "v1beta1.VersionResponse") + proto.RegisterType((*DecryptRequest)(nil), "v1beta1.DecryptRequest") + proto.RegisterType((*DecryptResponse)(nil), "v1beta1.DecryptResponse") + proto.RegisterType((*EncryptRequest)(nil), "v1beta1.EncryptRequest") + proto.RegisterType((*EncryptResponse)(nil), "v1beta1.EncryptResponse") +} + +func init() { proto.RegisterFile("service.proto", fileDescriptor_a0b84a42fa06f626) } + +var fileDescriptor_a0b84a42fa06f626 = []byte{ + // 287 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xcd, 0x4a, 0xc4, 0x30, + 0x10, 0xde, 0xae, 0xb8, 0xc5, 0xb1, 0xb6, 0x10, 0x16, 0x2d, 0x9e, 0x34, 0x97, 0x55, 0x0f, 0x85, + 0xd5, 0xbb, 0x88, 0xe8, 0x49, 0xf4, 0x50, 0xc1, 0xab, 0x64, 0xcb, 0xa0, 0x05, 0x9b, 0xc6, 0x24, + 0x5b, 0xd9, 0x17, 0xf5, 0x79, 0xc4, 0x66, 0x5a, 0xd3, 0x15, 0x71, 0x8f, 0x33, 0x99, 0xef, 0x6f, + 0x26, 0xb0, 0x67, 0x50, 0x37, 0x65, 0x81, 0x99, 0xd2, 0xb5, 0xad, 0x59, 0xd8, 0xcc, 0x17, 0x68, + 0xc5, 0x9c, 0x9f, 0x41, 0xfc, 0x84, 0xda, 0x94, 0xb5, 0xcc, 0xf1, 0x7d, 0x89, 0xc6, 0xb2, 0x14, + 0xc2, 0xc6, 0x75, 0xd2, 0xe0, 0x28, 0x38, 0xd9, 0xc9, 0xbb, 0x92, 0x7f, 0x40, 0xd2, 0xcf, 0x1a, + 0x55, 0x4b, 0x83, 0x7f, 0x0f, 0xb3, 0x63, 0x88, 0xf4, 0x52, 0xda, 0xb2, 0xc2, 0x67, 0x29, 0x2a, + 0x4c, 0xc7, 0xed, 0xf3, 0x2e, 0xf5, 0x1e, 0x44, 0x85, 0x6c, 0x06, 0x49, 0x37, 0xd2, 0x91, 0x6c, + 0xb5, 0x53, 0x31, 0xb5, 0x49, 0x8d, 0x5f, 0x43, 0x7c, 0x83, 0x85, 0x5e, 0x29, 0xfb, 0xaf, 0x49, + 0xb6, 0x0f, 0x93, 0xa2, 0x54, 0xaf, 0xa8, 0x5b, 0xc5, 0x28, 0xa7, 0x8a, 0xcf, 0x20, 0xe9, 0x39, + 0xc8, 0xfc, 0x14, 0xb6, 0xd5, 0x9b, 0x28, 0x1d, 0x45, 0x94, 0xbb, 0x82, 0x5f, 0x41, 0x7c, 0x2b, + 0x37, 0x14, 0xeb, 0x19, 0xc6, 0x3e, 0xc3, 0x29, 0x24, 0x3d, 0x03, 0x49, 0xfd, 0xb8, 0x0a, 0x7c, + 0x57, 0xe7, 0x9f, 0x01, 0x4c, 0xef, 0x70, 0x75, 0x2f, 0xa4, 0x78, 0xc1, 0x0a, 0xa5, 0x7d, 0x74, + 0x67, 0x62, 0x97, 0x10, 0x52, 0x7a, 0x76, 0x90, 0xd1, 0xb1, 0xb2, 0xe1, 0xa5, 0x0e, 0xd3, 0xdf, + 0x0f, 0x4e, 0x8e, 0x8f, 0xbe, 0xf1, 0x14, 0xd7, 0xc3, 0x0f, 0x97, 0xe8, 0xe1, 0xd7, 0x36, 0xe3, + 0xf0, 0x94, 0xc1, 0xc3, 0x0f, 0xf7, 0xe2, 0xe1, 0xd7, 0xe2, 0xf2, 0xd1, 0x62, 0xd2, 0xfe, 0xb3, + 0x8b, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x33, 0x8d, 0x09, 0xe1, 0x78, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// KeyManagementServiceClient is the client API for KeyManagementService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyManagementServiceClient interface { + // Version returns the runtime name and runtime version of the KMS provider. + Version(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*VersionResponse, error) + // Execute decryption operation in KMS provider. + Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) + // Execute encryption operation in KMS provider. + Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) +} + +type keyManagementServiceClient struct { + cc *grpc.ClientConn +} + +func NewKeyManagementServiceClient(cc *grpc.ClientConn) KeyManagementServiceClient { + return &keyManagementServiceClient{cc} +} + +func (c *keyManagementServiceClient) Version(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*VersionResponse, error) { + out := new(VersionResponse) + err := c.cc.Invoke(ctx, "/v1beta1.KeyManagementService/Version", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) { + out := new(DecryptResponse) + err := c.cc.Invoke(ctx, "/v1beta1.KeyManagementService/Decrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) { + out := new(EncryptResponse) + err := c.cc.Invoke(ctx, "/v1beta1.KeyManagementService/Encrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyManagementServiceServer is the server API for KeyManagementService service. +type KeyManagementServiceServer interface { + // Version returns the runtime name and runtime version of the KMS provider. + Version(context.Context, *VersionRequest) (*VersionResponse, error) + // Execute decryption operation in KMS provider. + Decrypt(context.Context, *DecryptRequest) (*DecryptResponse, error) + // Execute encryption operation in KMS provider. + Encrypt(context.Context, *EncryptRequest) (*EncryptResponse, error) +} + +// UnimplementedKeyManagementServiceServer can be embedded to have forward compatible implementations. +type UnimplementedKeyManagementServiceServer struct { +} + +func (*UnimplementedKeyManagementServiceServer) Version(ctx context.Context, req *VersionRequest) (*VersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") +} +func (*UnimplementedKeyManagementServiceServer) Decrypt(ctx context.Context, req *DecryptRequest) (*DecryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Decrypt not implemented") +} +func (*UnimplementedKeyManagementServiceServer) Encrypt(ctx context.Context, req *EncryptRequest) (*EncryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Encrypt not implemented") +} + +func RegisterKeyManagementServiceServer(s *grpc.Server, srv KeyManagementServiceServer) { + s.RegisterService(&_KeyManagementService_serviceDesc, srv) +} + +func _KeyManagementService_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1beta1.KeyManagementService/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Version(ctx, req.(*VersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_Decrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DecryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Decrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1beta1.KeyManagementService/Decrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Decrypt(ctx, req.(*DecryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_Encrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EncryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Encrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1beta1.KeyManagementService/Encrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Encrypt(ctx, req.(*EncryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyManagementService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v1beta1.KeyManagementService", + HandlerType: (*KeyManagementServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Version", + Handler: _KeyManagementService_Version_Handler, + }, + { + MethodName: "Decrypt", + Handler: _KeyManagementService_Decrypt_Handler, + }, + { + MethodName: "Encrypt", + Handler: _KeyManagementService_Encrypt_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "service.proto", +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto new file mode 100644 index 0000000000..b6c2f31c7e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto @@ -0,0 +1,70 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// To regenerate service.pb.go run hack/update-generated-kms.sh +syntax = "proto3"; + +package v1beta1; + +// This service defines the public APIs for remote KMS provider. +service KeyManagementService { + // Version returns the runtime name and runtime version of the KMS provider. + rpc Version(VersionRequest) returns (VersionResponse) {} + + // Execute decryption operation in KMS provider. + rpc Decrypt(DecryptRequest) returns (DecryptResponse) {} + // Execute encryption operation in KMS provider. + rpc Encrypt(EncryptRequest) returns (EncryptResponse) {} +} + +message VersionRequest { + // Version of the KMS plugin API. + string version = 1; +} + +message VersionResponse { + // Version of the KMS plugin API. + string version = 1; + // Name of the KMS provider. + string runtime_name = 2; + // Version of the KMS provider. The string must be semver-compatible. + string runtime_version = 3; +} + +message DecryptRequest { + // Version of the KMS plugin API. + string version = 1; + // The data to be decrypted. + bytes cipher = 2; +} + +message DecryptResponse { + // The decrypted data. + bytes plain = 1; +} + +message EncryptRequest { + // Version of the KMS plugin API. + string version = 1; + // The data to be encrypted. + bytes plain = 2; +} + +message EncryptResponse { + // The encrypted data. + bytes cipher = 1; +} + diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/v1beta1.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/v1beta1.go new file mode 100644 index 0000000000..842d0a2fdc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/v1beta1.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains definition of kms-plugin's gRPC service. +package v1beta1 + +// IsVersionCheckMethod determines whether the supplied method is a version check against kms-plugin. +func IsVersionCheckMethod(method string) bool { + return method == "/v1beta1.KeyManagementService/Version" +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/identity.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/identity.go new file mode 100644 index 0000000000..e322bd9b17 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/identity.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package identity + +import ( + "bytes" + "fmt" + + "k8s.io/apiserver/pkg/storage/value" +) + +// identityTransformer performs no transformation on provided data, but validates +// that the data is not encrypted data during TransformFromStorage +type identityTransformer struct{} + +// NewEncryptCheckTransformer returns an identityTransformer which returns an error +// on attempts to read encrypted data +func NewEncryptCheckTransformer() value.Transformer { + return identityTransformer{} +} + +// TransformFromStorage returns the input bytes if the data is not encrypted +func (identityTransformer) TransformFromStorage(b []byte, context value.Context) ([]byte, bool, error) { + // identityTransformer has to return an error if the data is encoded using another transformer. + // JSON data starts with '{'. Protobuf data has a prefix 'k8s[\x00-\xFF]'. + // Prefix 'k8s:enc:' is reserved for encrypted data on disk. + if bytes.HasPrefix(b, []byte("k8s:enc:")) { + return []byte{}, false, fmt.Errorf("identity transformer tried to read encrypted data") + } + return b, false, nil +} + +// TransformToStorage implements the Transformer interface for identityTransformer +func (identityTransformer) TransformToStorage(b []byte, context value.Context) ([]byte, error) { + return b, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go new file mode 100644 index 0000000000..0eaa628245 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go @@ -0,0 +1,69 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package secretbox transforms values for storage at rest using XSalsa20 and Poly1305. +package secretbox + +import ( + "crypto/rand" + "fmt" + + "golang.org/x/crypto/nacl/secretbox" + + "k8s.io/apiserver/pkg/storage/value" +) + +// secretbox implements at rest encryption of the provided values given a 32 byte secret key. +// Uses a standard 24 byte nonce (placed at the beginning of the cipher text) generated +// from crypto/rand. Does not perform authentication of the data at rest. +type secretboxTransformer struct { + key [32]byte +} + +const nonceSize = 24 + +// NewSecretboxTransformer takes the given key and performs encryption and decryption on the given +// data. +func NewSecretboxTransformer(key [32]byte) value.Transformer { + return &secretboxTransformer{key: key} +} + +func (t *secretboxTransformer) TransformFromStorage(data []byte, context value.Context) ([]byte, bool, error) { + if len(data) < (secretbox.Overhead + nonceSize) { + return nil, false, fmt.Errorf("the stored data was shorter than the required size") + } + var nonce [nonceSize]byte + copy(nonce[:], data[:nonceSize]) + data = data[nonceSize:] + out := make([]byte, 0, len(data)-secretbox.Overhead) + result, ok := secretbox.Open(out, data, &nonce, &t.key) + if !ok { + return nil, false, fmt.Errorf("output array was not large enough for encryption") + } + return result, false, nil +} + +func (t *secretboxTransformer) TransformToStorage(data []byte, context value.Context) ([]byte, error) { + var nonce [nonceSize]byte + n, err := rand.Read(nonce[:]) + if err != nil { + return nil, err + } + if n != nonceSize { + return nil, fmt.Errorf("unable to read sufficient random bytes") + } + return secretbox.Seal(nonce[:], data, &nonce, &t.key), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go new file mode 100644 index 0000000000..292cfcd90d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go @@ -0,0 +1,141 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "sync" + "time" + + "google.golang.org/grpc/status" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +const ( + namespace = "apiserver" + subsystem = "storage" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + transformerLatencies = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "transformation_duration_seconds", + Help: "Latencies in seconds of value transformation operations.", + // In-process transformations (ex. AES CBC) complete on the order of 20 microseconds. However, when + // external KMS is involved latencies may climb into milliseconds. + Buckets: metrics.ExponentialBuckets(5e-6, 2, 14), + StabilityLevel: metrics.ALPHA, + }, + []string{"transformation_type"}, + ) + + transformerOperationsTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "transformation_operations_total", + Help: "Total number of transformations.", + StabilityLevel: metrics.ALPHA, + }, + []string{"transformation_type", "transformer_prefix", "status"}, + ) + + envelopeTransformationCacheMissTotal = metrics.NewCounter( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "envelope_transformation_cache_misses_total", + Help: "Total number of cache misses while accessing key decryption key(KEK).", + StabilityLevel: metrics.ALPHA, + }, + ) + + dataKeyGenerationLatencies = metrics.NewHistogram( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "data_key_generation_duration_seconds", + Help: "Latencies in seconds of data encryption key(DEK) generation operations.", + Buckets: metrics.ExponentialBuckets(5e-6, 2, 14), + StabilityLevel: metrics.ALPHA, + }, + ) + + dataKeyGenerationFailuresTotal = metrics.NewCounter( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "data_key_generation_failures_total", + Help: "Total number of failed data encryption key(DEK) generation operations.", + StabilityLevel: metrics.ALPHA, + }, + ) +) + +var registerMetrics sync.Once + +func RegisterMetrics() { + registerMetrics.Do(func() { + legacyregistry.MustRegister(transformerLatencies) + legacyregistry.MustRegister(transformerOperationsTotal) + legacyregistry.MustRegister(envelopeTransformationCacheMissTotal) + legacyregistry.MustRegister(dataKeyGenerationLatencies) + legacyregistry.MustRegister(dataKeyGenerationFailuresTotal) + }) +} + +// RecordTransformation records latencies and count of TransformFromStorage and TransformToStorage operations. +// Note that transformation_failures_total metric is deprecated, use transformation_operations_total instead. +func RecordTransformation(transformationType, transformerPrefix string, start time.Time, err error) { + transformerOperationsTotal.WithLabelValues(transformationType, transformerPrefix, status.Code(err).String()).Inc() + + switch { + case err == nil: + transformerLatencies.WithLabelValues(transformationType).Observe(sinceInSeconds(start)) + } +} + +// RecordCacheMiss records a miss on Key Encryption Key(KEK) - call to KMS was required to decrypt KEK. +func RecordCacheMiss() { + envelopeTransformationCacheMissTotal.Inc() +} + +// RecordDataKeyGeneration records latencies and count of Data Encryption Key generation operations. +func RecordDataKeyGeneration(start time.Time, err error) { + if err != nil { + dataKeyGenerationFailuresTotal.Inc() + return + } + + dataKeyGenerationLatencies.Observe(sinceInSeconds(start)) +} + +// sinceInSeconds gets the time since the specified start in seconds. +func sinceInSeconds(start time.Time) float64 { + return time.Since(start).Seconds() +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go b/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go new file mode 100644 index 0000000000..3bc41cd9a2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go @@ -0,0 +1,209 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package value contains methods for assisting with transformation of values in storage. +package value + +import ( + "bytes" + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/errors" +) + +func init() { + RegisterMetrics() +} + +// Context is additional information that a storage transformation may need to verify the data at rest. +type Context interface { + // AuthenticatedData should return an array of bytes that describes the current value. If the value changes, + // the transformer may report the value as unreadable or tampered. This may be nil if no such description exists + // or is needed. For additional verification, set this to data that strongly identifies the value, such as + // the key and creation version of the stored data. + AuthenticatedData() []byte +} + +// Transformer allows a value to be transformed before being read from or written to the underlying store. The methods +// must be able to undo the transformation caused by the other. +type Transformer interface { + // TransformFromStorage may transform the provided data from its underlying storage representation or return an error. + // Stale is true if the object on disk is stale and a write to etcd should be issued, even if the contents of the object + // have not changed. + TransformFromStorage(data []byte, context Context) (out []byte, stale bool, err error) + // TransformToStorage may transform the provided data into the appropriate form in storage or return an error. + TransformToStorage(data []byte, context Context) (out []byte, err error) +} + +type identityTransformer struct{} + +// IdentityTransformer performs no transformation of the provided data. +var IdentityTransformer Transformer = identityTransformer{} + +func (identityTransformer) TransformFromStorage(b []byte, ctx Context) ([]byte, bool, error) { + return b, false, nil +} +func (identityTransformer) TransformToStorage(b []byte, ctx Context) ([]byte, error) { + return b, nil +} + +// DefaultContext is a simple implementation of Context for a slice of bytes. +type DefaultContext []byte + +// AuthenticatedData returns itself. +func (c DefaultContext) AuthenticatedData() []byte { return []byte(c) } + +// MutableTransformer allows a transformer to be changed safely at runtime. +type MutableTransformer struct { + lock sync.RWMutex + transformer Transformer +} + +// NewMutableTransformer creates a transformer that can be updated at any time by calling Set() +func NewMutableTransformer(transformer Transformer) *MutableTransformer { + return &MutableTransformer{transformer: transformer} +} + +// Set updates the nested transformer. +func (t *MutableTransformer) Set(transformer Transformer) { + t.lock.Lock() + t.transformer = transformer + t.lock.Unlock() +} + +func (t *MutableTransformer) TransformFromStorage(data []byte, context Context) (out []byte, stale bool, err error) { + t.lock.RLock() + transformer := t.transformer + t.lock.RUnlock() + return transformer.TransformFromStorage(data, context) +} +func (t *MutableTransformer) TransformToStorage(data []byte, context Context) (out []byte, err error) { + t.lock.RLock() + transformer := t.transformer + t.lock.RUnlock() + return transformer.TransformToStorage(data, context) +} + +// PrefixTransformer holds a transformer interface and the prefix that the transformation is located under. +type PrefixTransformer struct { + Prefix []byte + Transformer Transformer +} + +type prefixTransformers struct { + transformers []PrefixTransformer + err error +} + +var _ Transformer = &prefixTransformers{} + +// NewPrefixTransformers supports the Transformer interface by checking the incoming data against the provided +// prefixes in order. The first matching prefix will be used to transform the value (the prefix is stripped +// before the Transformer interface is invoked). The first provided transformer will be used when writing to +// the store. +func NewPrefixTransformers(err error, transformers ...PrefixTransformer) Transformer { + if err == nil { + err = fmt.Errorf("the provided value does not match any of the supported transformers") + } + return &prefixTransformers{ + transformers: transformers, + err: err, + } +} + +// TransformFromStorage finds the first transformer with a prefix matching the provided data and returns +// the result of transforming the value. It will always mark any transformation as stale that is not using +// the first transformer. +func (t *prefixTransformers) TransformFromStorage(data []byte, context Context) ([]byte, bool, error) { + start := time.Now() + var errs []error + for i, transformer := range t.transformers { + if bytes.HasPrefix(data, transformer.Prefix) { + result, stale, err := transformer.Transformer.TransformFromStorage(data[len(transformer.Prefix):], context) + // To migrate away from encryption, user can specify an identity transformer higher up + // (in the config file) than the encryption transformer. In that scenario, the identity transformer needs to + // identify (during reads from disk) whether the data being read is encrypted or not. If the data is encrypted, + // it shall throw an error, but that error should not prevent the next subsequent transformer from being tried. + if len(transformer.Prefix) == 0 && err != nil { + continue + } + if len(transformer.Prefix) == 0 { + RecordTransformation("from_storage", "identity", start, err) + } else { + RecordTransformation("from_storage", string(transformer.Prefix), start, err) + } + + // It is valid to have overlapping prefixes when the same encryption provider + // is specified multiple times but with different keys (the first provider is + // being rotated to and some later provider is being rotated away from). + // + // Example: + // + // { + // "aescbc": { + // "keys": [ + // { + // "name": "2", + // "secret": "some key 2" + // } + // ] + // } + // }, + // { + // "aescbc": { + // "keys": [ + // { + // "name": "1", + // "secret": "some key 1" + // } + // ] + // } + // }, + // + // The transformers for both aescbc configs share the prefix k8s:enc:aescbc:v1: + // but a failure in the first one should not prevent a later match from being attempted. + // Thus we never short-circuit on a prefix match that results in an error. + if err != nil { + errs = append(errs, err) + continue + } + + return result, stale || i != 0, err + } + } + if err := errors.Reduce(errors.NewAggregate(errs)); err != nil { + return nil, false, err + } + RecordTransformation("from_storage", "unknown", start, t.err) + return nil, false, t.err +} + +// TransformToStorage uses the first transformer and adds its prefix to the data. +func (t *prefixTransformers) TransformToStorage(data []byte, context Context) ([]byte, error) { + start := time.Now() + transformer := t.transformers[0] + prefixedData := make([]byte, len(transformer.Prefix), len(data)+len(transformer.Prefix)) + copy(prefixedData, transformer.Prefix) + result, err := transformer.Transformer.TransformToStorage(data, context) + RecordTransformation("to_storage", string(transformer.Prefix), start, err) + if err != nil { + return nil, err + } + prefixedData = append(prefixedData, result...) + return prefixedData, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storageversion/OWNERS b/vendor/k8s.io/apiserver/pkg/storageversion/OWNERS new file mode 100644 index 0000000000..ca9aa13584 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storageversion/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- caesarxuchao +- roycaihw diff --git a/vendor/k8s.io/apiserver/pkg/storageversion/manager.go b/vendor/k8s.io/apiserver/pkg/storageversion/manager.go new file mode 100644 index 0000000000..03e21a4d6d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storageversion/manager.go @@ -0,0 +1,277 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storageversion + +import ( + "fmt" + "sort" + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + _ "k8s.io/component-base/metrics/prometheus/workqueue" // for workqueue metric registration + "k8s.io/klog/v2" +) + +// ResourceInfo contains the information to register the resource to the +// storage version API. +type ResourceInfo struct { + GroupResource schema.GroupResource + + EncodingVersion string + // Used to calculate decodable versions. Can only be used after all + // equivalent versions are registered by InstallREST. + EquivalentResourceMapper runtime.EquivalentResourceRegistry +} + +// Manager records the resources whose StorageVersions need updates, and provides a method to update those StorageVersions. +type Manager interface { + // AddResourceInfo records resources whose StorageVersions need updates + AddResourceInfo(resources ...*ResourceInfo) + // UpdateStorageVersions tries to update the StorageVersions of the recorded resources + UpdateStorageVersions(kubeAPIServerClientConfig *rest.Config, apiserverID string) + // PendingUpdate returns true if the StorageVersion of the given resource is still pending update. + PendingUpdate(gr schema.GroupResource) bool + // LastUpdateError returns the last error hit when updating the storage version of the given resource. + LastUpdateError(gr schema.GroupResource) error + // Completed returns true if updating StorageVersions of all recorded resources has completed. + Completed() bool +} + +var _ Manager = &defaultManager{} + +// defaultManager indicates if an apiserver has completed reporting its storage versions. +type defaultManager struct { + completed atomic.Value + + mu sync.RWMutex + // managedResourceInfos records the ResourceInfos whose StorageVersions will get updated in the next + // UpdateStorageVersions call + managedResourceInfos map[*ResourceInfo]struct{} + // managedStatus records the update status of StorageVersion for each GroupResource. Since one + // ResourceInfo may expand into multiple GroupResource (e.g. ingresses.networking.k8s.io and ingresses.extensions), + // this map allows quick status lookup for a GroupResource, during API request handling. + managedStatus map[schema.GroupResource]*updateStatus +} + +type updateStatus struct { + done bool + lastErr error +} + +// NewDefaultManager creates a new defaultManager. +func NewDefaultManager() Manager { + s := &defaultManager{} + s.completed.Store(false) + s.managedResourceInfos = make(map[*ResourceInfo]struct{}) + s.managedStatus = make(map[schema.GroupResource]*updateStatus) + return s +} + +// AddResourceInfo adds ResourceInfo to the manager. +func (s *defaultManager) AddResourceInfo(resources ...*ResourceInfo) { + s.mu.Lock() + defer s.mu.Unlock() + for _, r := range resources { + s.managedResourceInfos[r] = struct{}{} + s.addPendingManagedStatusLocked(r) + } +} + +func (s *defaultManager) addPendingManagedStatusLocked(r *ResourceInfo) { + gvrs := r.EquivalentResourceMapper.EquivalentResourcesFor(r.GroupResource.WithVersion(""), "") + for _, gvr := range gvrs { + gr := gvr.GroupResource() + if _, ok := s.managedStatus[gr]; !ok { + s.managedStatus[gr] = &updateStatus{} + } + } +} + +// UpdateStorageVersions tries to update the StorageVersions of the recorded resources +func (s *defaultManager) UpdateStorageVersions(kubeAPIServerClientConfig *rest.Config, serverID string) { + clientset, err := kubernetes.NewForConfig(kubeAPIServerClientConfig) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to get clientset: %v", err)) + return + } + sc := clientset.InternalV1alpha1().StorageVersions() + + s.mu.RLock() + resources := []ResourceInfo{} + for resource := range s.managedResourceInfos { + resources = append(resources, *resource) + } + s.mu.RUnlock() + hasFailure := false + // Sorting the list to make sure we have a consistent dedup result, and + // therefore avoid creating unnecessarily duplicated StorageVersion objects. + // For example, extensions.ingresses and networking.k8s.io.ingresses share + // the same underlying storage. Without sorting, in an HA cluster, one + // apiserver may dedup and update StorageVersion for extensions.ingresses, + // while another apiserver may dedup and update StorageVersion for + // networking.k8s.io.ingresses. The storage migrator (which migrates objects + // per GroupResource) will migrate these resources twice, since both + // StorageVersion objects have CommonEncodingVersion (each with one server registered). + sortResourceInfosByGroupResource(resources) + for _, r := range dedupResourceInfos(resources) { + dv := decodableVersions(r.EquivalentResourceMapper, r.GroupResource) + gr := r.GroupResource + // Group must be a valid subdomain in DNS (RFC 1123) + if len(gr.Group) == 0 { + gr.Group = "core" + } + if err := updateStorageVersionFor(sc, serverID, gr, r.EncodingVersion, dv); err != nil { + utilruntime.HandleError(fmt.Errorf("failed to update storage version for %v: %v", r.GroupResource, err)) + s.recordStatusFailure(&r, err) + hasFailure = true + continue + } + klog.V(2).Infof("successfully updated storage version for %v", r.GroupResource) + s.recordStatusSuccess(&r) + } + if hasFailure { + return + } + klog.V(2).Infof("storage version updates complete") + s.setComplete() +} + +// dedupResourceInfos dedups ResourceInfos with the same underlying storage. +// ResourceInfos from the same Group with different Versions share the same underlying storage. +// ResourceInfos from different Groups may share the same underlying storage, e.g. +// networking.k8s.io ingresses and extensions ingresses. The StorageVersion manager +// only needs to update one StorageVersion for the equivalent Groups. +func dedupResourceInfos(infos []ResourceInfo) []ResourceInfo { + var ret []ResourceInfo + seen := make(map[schema.GroupResource]struct{}) + for _, info := range infos { + gr := info.GroupResource + if _, ok := seen[gr]; ok { + continue + } + gvrs := info.EquivalentResourceMapper.EquivalentResourcesFor(gr.WithVersion(""), "") + for _, gvr := range gvrs { + seen[gvr.GroupResource()] = struct{}{} + } + ret = append(ret, info) + } + return ret +} + +func sortResourceInfosByGroupResource(infos []ResourceInfo) { + sort.Sort(byGroupResource(infos)) +} + +type byGroupResource []ResourceInfo + +func (s byGroupResource) Len() int { return len(s) } + +func (s byGroupResource) Less(i, j int) bool { + if s[i].GroupResource.Group == s[j].GroupResource.Group { + return s[i].GroupResource.Resource < s[j].GroupResource.Resource + } + return s[i].GroupResource.Group < s[j].GroupResource.Group +} + +func (s byGroupResource) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// recordStatusSuccess marks updated ResourceInfo as completed. +func (s *defaultManager) recordStatusSuccess(r *ResourceInfo) { + s.mu.Lock() + defer s.mu.Unlock() + s.recordStatusSuccessLocked(r) +} + +func (s *defaultManager) recordStatusSuccessLocked(r *ResourceInfo) { + gvrs := r.EquivalentResourceMapper.EquivalentResourcesFor(r.GroupResource.WithVersion(""), "") + for _, gvr := range gvrs { + s.recordSuccessGroupResourceLocked(gvr.GroupResource()) + } +} + +func (s *defaultManager) recordSuccessGroupResourceLocked(gr schema.GroupResource) { + if _, ok := s.managedStatus[gr]; !ok { + return + } + s.managedStatus[gr].done = true + s.managedStatus[gr].lastErr = nil +} + +// recordStatusFailure records latest error updating ResourceInfo. +func (s *defaultManager) recordStatusFailure(r *ResourceInfo, err error) { + s.mu.Lock() + defer s.mu.Unlock() + s.recordStatusFailureLocked(r, err) +} + +func (s *defaultManager) recordStatusFailureLocked(r *ResourceInfo, err error) { + gvrs := r.EquivalentResourceMapper.EquivalentResourcesFor(r.GroupResource.WithVersion(""), "") + for _, gvr := range gvrs { + s.recordErrorGroupResourceLocked(gvr.GroupResource(), err) + } +} + +func (s *defaultManager) recordErrorGroupResourceLocked(gr schema.GroupResource, err error) { + if _, ok := s.managedStatus[gr]; !ok { + return + } + s.managedStatus[gr].lastErr = err +} + +// PendingUpdate returns if the StorageVersion of a resource is still wait to be updated. +func (s *defaultManager) PendingUpdate(gr schema.GroupResource) bool { + s.mu.RLock() + defer s.mu.RUnlock() + if _, ok := s.managedStatus[gr]; !ok { + return false + } + return !s.managedStatus[gr].done +} + +// LastUpdateError returns the last error hit when updating the storage version of the given resource. +func (s *defaultManager) LastUpdateError(gr schema.GroupResource) error { + s.mu.RLock() + defer s.mu.RUnlock() + if _, ok := s.managedStatus[gr]; !ok { + return fmt.Errorf("couldn't find managed status for %v", gr) + } + return s.managedStatus[gr].lastErr +} + +// setComplete marks the completion of updating StorageVersions. No write requests need to be blocked anymore. +func (s *defaultManager) setComplete() { + s.completed.Store(true) +} + +// Completed returns if updating StorageVersions has completed. +func (s *defaultManager) Completed() bool { + return s.completed.Load().(bool) +} + +func decodableVersions(e runtime.EquivalentResourceRegistry, gr schema.GroupResource) []string { + var versions []string + decodingGVRs := e.EquivalentResourcesFor(gr.WithVersion(""), "") + for _, v := range decodingGVRs { + versions = append(versions, v.GroupVersion().String()) + } + return versions +} diff --git a/vendor/k8s.io/apiserver/pkg/storageversion/updater.go b/vendor/k8s.io/apiserver/pkg/storageversion/updater.go new file mode 100644 index 0000000000..10927fb0f0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storageversion/updater.go @@ -0,0 +1,128 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storageversion + +import ( + "context" + "fmt" + "time" + + "k8s.io/api/apiserverinternal/v1alpha1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" +) + +// Client has the methods required to update the storage version. +type Client interface { + Create(context.Context, *v1alpha1.StorageVersion, metav1.CreateOptions) (*v1alpha1.StorageVersion, error) + UpdateStatus(context.Context, *v1alpha1.StorageVersion, metav1.UpdateOptions) (*v1alpha1.StorageVersion, error) + Get(context.Context, string, metav1.GetOptions) (*v1alpha1.StorageVersion, error) +} + +func setCommonEncodingVersion(sv *v1alpha1.StorageVersion) { + if len(sv.Status.StorageVersions) == 0 { + return + } + firstVersion := sv.Status.StorageVersions[0].EncodingVersion + agreed := true + for _, ssv := range sv.Status.StorageVersions { + if ssv.EncodingVersion != firstVersion { + agreed = false + break + } + } + if agreed { + sv.Status.CommonEncodingVersion = &firstVersion + } else { + sv.Status.CommonEncodingVersion = nil + } +} + +// updateStorageVersionFor updates the storage version object for the resource. +func updateStorageVersionFor(c Client, apiserverID string, gr schema.GroupResource, encodingVersion string, decodableVersions []string) error { + retries := 3 + var retry int + var err error + for retry < retries { + err = singleUpdate(c, apiserverID, gr, encodingVersion, decodableVersions) + if err == nil { + return nil + } + if apierrors.IsAlreadyExists(err) || apierrors.IsConflict(err) { + time.Sleep(1 * time.Second) + continue + } + if err != nil { + klog.Errorf("retry %d, failed to update storage version for %v: %v", retry, gr, err) + retry++ + time.Sleep(1 * time.Second) + } + } + return err +} + +func singleUpdate(c Client, apiserverID string, gr schema.GroupResource, encodingVersion string, decodableVersions []string) error { + shouldCreate := false + name := fmt.Sprintf("%s.%s", gr.Group, gr.Resource) + sv, err := c.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + if apierrors.IsNotFound(err) { + shouldCreate = true + sv = &v1alpha1.StorageVersion{} + sv.ObjectMeta.Name = name + } + updatedSV := localUpdateStorageVersion(sv, apiserverID, encodingVersion, decodableVersions) + if shouldCreate { + createdSV, err := c.Create(context.TODO(), updatedSV, metav1.CreateOptions{}) + if err != nil { + return err + } + // assign the calculated status to the object just created, then update status + createdSV.Status = updatedSV.Status + _, err = c.UpdateStatus(context.TODO(), createdSV, metav1.UpdateOptions{}) + return err + } + _, err = c.UpdateStatus(context.TODO(), updatedSV, metav1.UpdateOptions{}) + return err +} + +// localUpdateStorageVersion updates the input storageversion with given server storageversion info. +// The function updates the input storageversion in place. +func localUpdateStorageVersion(sv *v1alpha1.StorageVersion, apiserverID, encodingVersion string, decodableVersions []string) *v1alpha1.StorageVersion { + newSSV := v1alpha1.ServerStorageVersion{ + APIServerID: apiserverID, + EncodingVersion: encodingVersion, + DecodableVersions: decodableVersions, + } + foundSSV := false + for i, ssv := range sv.Status.StorageVersions { + if ssv.APIServerID == apiserverID { + sv.Status.StorageVersions[i] = newSSV + foundSSV = true + break + } + } + if !foundSSV { + sv.Status.StorageVersions = append(sv.Status.StorageVersions, newSSV) + } + setCommonEncodingVersion(sv) + return sv +} diff --git a/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go b/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go new file mode 100644 index 0000000000..905523c734 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apihelpers + +import ( + "sort" + + flowcontrol "k8s.io/api/flowcontrol/v1beta1" +) + +// SetFlowSchemaCondition sets conditions. +func SetFlowSchemaCondition(flowSchema *flowcontrol.FlowSchema, newCondition flowcontrol.FlowSchemaCondition) { + existingCondition := GetFlowSchemaConditionByType(flowSchema, newCondition.Type) + if existingCondition == nil { + flowSchema.Status.Conditions = append(flowSchema.Status.Conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = newCondition.LastTransitionTime + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// GetFlowSchemaConditionByType gets conditions. +func GetFlowSchemaConditionByType(flowSchema *flowcontrol.FlowSchema, conditionType flowcontrol.FlowSchemaConditionType) *flowcontrol.FlowSchemaCondition { + for i := range flowSchema.Status.Conditions { + if flowSchema.Status.Conditions[i].Type == conditionType { + return &flowSchema.Status.Conditions[i] + } + } + return nil +} + +// SetPriorityLevelConfigurationCondition sets conditions. +func SetPriorityLevelConfigurationCondition(priorityLevel *flowcontrol.PriorityLevelConfiguration, newCondition flowcontrol.PriorityLevelConfigurationCondition) { + existingCondition := GetPriorityLevelConfigurationConditionByType(priorityLevel, newCondition.Type) + if existingCondition == nil { + priorityLevel.Status.Conditions = append(priorityLevel.Status.Conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = newCondition.LastTransitionTime + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// GetPriorityLevelConfigurationConditionByType gets conditions. +func GetPriorityLevelConfigurationConditionByType(priorityLevel *flowcontrol.PriorityLevelConfiguration, conditionType flowcontrol.PriorityLevelConfigurationConditionType) *flowcontrol.PriorityLevelConfigurationCondition { + for i := range priorityLevel.Status.Conditions { + if priorityLevel.Status.Conditions[i].Type == conditionType { + return &priorityLevel.Status.Conditions[i] + } + } + return nil +} + +var _ sort.Interface = FlowSchemaSequence{} + +// FlowSchemaSequence holds sorted set of pointers to FlowSchema objects. +// FlowSchemaSequence implements `sort.Interface` +type FlowSchemaSequence []*flowcontrol.FlowSchema + +func (s FlowSchemaSequence) Len() int { + return len(s) +} + +func (s FlowSchemaSequence) Less(i, j int) bool { + // the flow-schema w/ lower matching-precedence is prior + if ip, jp := s[i].Spec.MatchingPrecedence, s[j].Spec.MatchingPrecedence; ip != jp { + return ip < jp + } + // sort alphabetically + return s[i].Name < s[j].Name +} + +func (s FlowSchemaSequence) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/k8s.io/apiserver/pkg/util/dryrun/dryrun.go b/vendor/k8s.io/apiserver/pkg/util/dryrun/dryrun.go new file mode 100644 index 0000000000..3e28c29345 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/dryrun/dryrun.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dryrun + +// IsDryRun returns true if the DryRun flag is an actual dry-run. +func IsDryRun(flag []string) bool { + return len(flag) > 0 +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go new file mode 100644 index 0000000000..2f93df73d3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go @@ -0,0 +1,764 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "context" + "crypto/sha256" + "encoding/binary" + "encoding/json" + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/pkg/errors" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + apitypes "k8s.io/apimachinery/pkg/types" + apierrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + fcboot "k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/util/apihelpers" + fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" + fcfmt "k8s.io/apiserver/pkg/util/flowcontrol/format" + "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + flowcontrol "k8s.io/api/flowcontrol/v1beta1" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1beta1" +) + +// This file contains a simple local (to the apiserver) controller +// that digests API Priority and Fairness config objects (FlowSchema +// and PriorityLevelConfiguration) into the data structure that the +// filter uses. At this first level of development this controller +// takes the simplest possible approach: whenever notified of any +// change to any config object, or when any priority level that is +// undesired becomes completely unused, all the config objects are +// read and processed as a whole. + +// StartFunction begins the process of handlig a request. If the +// request gets queued then this function uses the given hashValue as +// the source of entropy as it shuffle-shards the request into a +// queue. The descr1 and descr2 values play no role in the logic but +// appear in log messages. This method does not return until the +// queuing, if any, for this request is done. If `execute` is false +// then `afterExecution` is irrelevant and the request should be +// rejected. Otherwise the request should be executed and +// `afterExecution` must be called exactly once. +type StartFunction func(ctx context.Context, hashValue uint64) (execute bool, afterExecution func()) + +// RequestDigest holds necessary info from request for flow-control +type RequestDigest struct { + RequestInfo *request.RequestInfo + User user.Info +} + +// `*configController` maintains eventual consistency with the API +// objects that configure API Priority and Fairness, and provides a +// procedural interface to the configured behavior. The methods of +// this type and cfgMeal follow the convention that the suffix +// "Locked" means that the caller must hold the configController lock. +type configController struct { + queueSetFactory fq.QueueSetFactory + obsPairGenerator metrics.TimedObserverPairGenerator + + // configQueue holds `(interface{})(0)` when the configuration + // objects need to be reprocessed. + configQueue workqueue.RateLimitingInterface + + plLister flowcontrollister.PriorityLevelConfigurationLister + plInformerSynced cache.InformerSynced + + fsLister flowcontrollister.FlowSchemaLister + fsInformerSynced cache.InformerSynced + + flowcontrolClient flowcontrolclient.FlowcontrolV1beta1Interface + + // serverConcurrencyLimit is the limit on the server's total + // number of non-exempt requests being served at once. This comes + // from server configuration. + serverConcurrencyLimit int + + // requestWaitLimit comes from server configuration. + requestWaitLimit time.Duration + + // This must be locked while accessing flowSchemas or + // priorityLevelStates. It is the lock involved in + // LockingWriteMultiple. + lock sync.Mutex + + // flowSchemas holds the flow schema objects, sorted by increasing + // numerical (decreasing logical) matching precedence. Every + // FlowSchema in this slice is immutable. + flowSchemas apihelpers.FlowSchemaSequence + + // priorityLevelStates maps the PriorityLevelConfiguration object + // name to the state for that level. Every name referenced from a + // member of `flowSchemas` has an entry here. + priorityLevelStates map[string]*priorityLevelState +} + +// priorityLevelState holds the state specific to a priority level. +type priorityLevelState struct { + // the API object or prototype prescribing this level. Nothing + // reached through this pointer is mutable. + pl *flowcontrol.PriorityLevelConfiguration + + // qsCompleter holds the QueueSetCompleter derived from `config` + // and `queues` if config is not exempt, nil otherwise. + qsCompleter fq.QueueSetCompleter + + // The QueueSet for this priority level. This is nil if and only + // if the priority level is exempt. + queues fq.QueueSet + + // quiescing==true indicates that this priority level should be + // removed when its queues have all drained. May be true only if + // queues is non-nil. + quiescing bool + + // number of goroutines between Controller::Match and calling the + // returned StartFunction + numPending int + + // Observers tracking number waiting, executing + obsPair metrics.TimedObserverPair +} + +// NewTestableController is extra flexible to facilitate testing +func newTestableController( + informerFactory kubeinformers.SharedInformerFactory, + flowcontrolClient flowcontrolclient.FlowcontrolV1beta1Interface, + serverConcurrencyLimit int, + requestWaitLimit time.Duration, + obsPairGenerator metrics.TimedObserverPairGenerator, + queueSetFactory fq.QueueSetFactory, +) *configController { + cfgCtlr := &configController{ + queueSetFactory: queueSetFactory, + obsPairGenerator: obsPairGenerator, + serverConcurrencyLimit: serverConcurrencyLimit, + requestWaitLimit: requestWaitLimit, + flowcontrolClient: flowcontrolClient, + priorityLevelStates: make(map[string]*priorityLevelState), + } + klog.V(2).Infof("NewTestableController with serverConcurrencyLimit=%d, requestWaitLimit=%s", serverConcurrencyLimit, requestWaitLimit) + cfgCtlr.initializeConfigController(informerFactory) + // ensure the data structure reflects the mandatory config + cfgCtlr.lockAndDigestConfigObjects(nil, nil) + return cfgCtlr +} + +// initializeConfigController sets up the controller that processes +// config API objects. +func (cfgCtlr *configController) initializeConfigController(informerFactory kubeinformers.SharedInformerFactory) { + cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue") + fci := informerFactory.Flowcontrol().V1beta1() + pli := fci.PriorityLevelConfigurations() + fsi := fci.FlowSchemas() + cfgCtlr.plLister = pli.Lister() + cfgCtlr.plInformerSynced = pli.Informer().HasSynced + cfgCtlr.fsLister = fsi.Lister() + cfgCtlr.fsInformerSynced = fsi.Informer().HasSynced + pli.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + pl := obj.(*flowcontrol.PriorityLevelConfiguration) + klog.V(7).Infof("Triggered API priority and fairness config reloading due to creation of PLC %s", pl.Name) + cfgCtlr.configQueue.Add(0) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + newPL := newObj.(*flowcontrol.PriorityLevelConfiguration) + oldPL := oldObj.(*flowcontrol.PriorityLevelConfiguration) + if !apiequality.Semantic.DeepEqual(oldPL.Spec, newPL.Spec) { + klog.V(7).Infof("Triggered API priority and fairness config reloading due to spec update of PLC %s", newPL.Name) + cfgCtlr.configQueue.Add(0) + } + }, + DeleteFunc: func(obj interface{}) { + name, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + klog.V(7).Infof("Triggered API priority and fairness config reloading due to deletion of PLC %s", name) + cfgCtlr.configQueue.Add(0) + + }}) + fsi.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + fs := obj.(*flowcontrol.FlowSchema) + klog.V(7).Infof("Triggered API priority and fairness config reloading due to creation of FS %s", fs.Name) + cfgCtlr.configQueue.Add(0) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + newFS := newObj.(*flowcontrol.FlowSchema) + oldFS := oldObj.(*flowcontrol.FlowSchema) + if !apiequality.Semantic.DeepEqual(oldFS.Spec, newFS.Spec) { + klog.V(7).Infof("Triggered API priority and fairness config reloading due to spec update of FS %s", newFS.Name) + cfgCtlr.configQueue.Add(0) + } + }, + DeleteFunc: func(obj interface{}) { + name, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + klog.V(7).Infof("Triggered API priority and fairness config reloading due to deletion of FS %s", name) + cfgCtlr.configQueue.Add(0) + + }}) +} + +// MaintainObservations keeps the observers from +// metrics.PriorityLevelConcurrencyObserverPairGenerator from falling +// too far behind +func (cfgCtlr *configController) MaintainObservations(stopCh <-chan struct{}) { + wait.Until(cfgCtlr.updateObservations, 10*time.Second, stopCh) +} + +func (cfgCtlr *configController) updateObservations() { + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + for _, plc := range cfgCtlr.priorityLevelStates { + if plc.queues != nil { + plc.queues.UpdateObservations() + } + } +} + +func (cfgCtlr *configController) Run(stopCh <-chan struct{}) error { + defer cfgCtlr.configQueue.ShutDown() + klog.Info("Starting API Priority and Fairness config controller") + if ok := cache.WaitForCacheSync(stopCh, cfgCtlr.plInformerSynced, cfgCtlr.fsInformerSynced); !ok { + return fmt.Errorf("Never achieved initial sync") + } + klog.Info("Running API Priority and Fairness config worker") + wait.Until(cfgCtlr.runWorker, time.Second, stopCh) + klog.Info("Shutting down API Priority and Fairness config worker") + return nil +} + +func (cfgCtlr *configController) runWorker() { + for cfgCtlr.processNextWorkItem() { + } +} + +func (cfgCtlr *configController) processNextWorkItem() bool { + obj, shutdown := cfgCtlr.configQueue.Get() + if shutdown { + return false + } + + func(obj interface{}) { + defer cfgCtlr.configQueue.Done(obj) + if !cfgCtlr.syncOne() { + cfgCtlr.configQueue.AddRateLimited(obj) + } else { + cfgCtlr.configQueue.Forget(obj) + } + }(obj) + + return true +} + +// syncOne attempts to sync all the API Priority and Fairness config +// objects. It either succeeds and returns `true` or logs an error +// and returns `false`. +func (cfgCtlr *configController) syncOne() bool { + all := labels.Everything() + newPLs, err := cfgCtlr.plLister.List(all) + if err != nil { + klog.Errorf("Unable to list PriorityLevelConfiguration objects: %s", err.Error()) + return false + } + newFSs, err := cfgCtlr.fsLister.List(all) + if err != nil { + klog.Errorf("Unable to list FlowSchema objects: %s", err.Error()) + return false + } + err = cfgCtlr.digestConfigObjects(newPLs, newFSs) + if err == nil { + return true + } + klog.Error(err) + return false +} + +// cfgMeal is the data involved in the process of digesting the API +// objects that configure API Priority and Fairness. All the config +// objects are digested together, because this is the simplest way to +// cope with the various dependencies between objects. The process of +// digestion is done in four passes over config objects --- three +// passes over PriorityLevelConfigurations and one pass over the +// FlowSchemas --- with the work dvided among the passes according to +// those dependencies. +type cfgMeal struct { + cfgCtlr *configController + + newPLStates map[string]*priorityLevelState + + // The sum of the concurrency shares of the priority levels in the + // new configuration + shareSum float64 + + // These keep track of which mandatory priority level config + // objects have been digested + haveExemptPL, haveCatchAllPL bool + + // Buffered FlowSchema status updates to do. Do them when the + // lock is not held, to avoid a deadlock due to such a request + // provoking a call into this controller while the lock held + // waiting on that request to complete. + fsStatusUpdates []fsStatusUpdate +} + +// A buffered set of status updates for a FlowSchema +type fsStatusUpdate struct { + flowSchema *flowcontrol.FlowSchema + condition flowcontrol.FlowSchemaCondition + oldValue flowcontrol.FlowSchemaCondition +} + +// digestConfigObjects is given all the API objects that configure +// cfgCtlr and writes its consequent new configState. +func (cfgCtlr *configController) digestConfigObjects(newPLs []*flowcontrol.PriorityLevelConfiguration, newFSs []*flowcontrol.FlowSchema) error { + fsStatusUpdates := cfgCtlr.lockAndDigestConfigObjects(newPLs, newFSs) + var errs []error + for _, fsu := range fsStatusUpdates { + enc, err := json.Marshal(fsu.condition) + if err != nil { + // should never happen because these conditions are created here and well formed + panic(fmt.Sprintf("Failed to json.Marshall(%#+v): %s", fsu.condition, err.Error())) + } + klog.V(4).Infof("Writing Condition %s to FlowSchema %s because its previous value was %s", string(enc), fsu.flowSchema.Name, fcfmt.Fmt(fsu.oldValue)) + _, err = cfgCtlr.flowcontrolClient.FlowSchemas().Patch(context.TODO(), fsu.flowSchema.Name, apitypes.StrategicMergePatchType, []byte(fmt.Sprintf(`{"status": {"conditions": [ %s ] } }`, string(enc))), metav1.PatchOptions{FieldManager: "api-priority-and-fairness-config-consumer-v1"}, "status") + if err != nil { + errs = append(errs, errors.Wrap(err, fmt.Sprintf("failed to set a status.condition for FlowSchema %s", fsu.flowSchema.Name))) + } + } + if len(errs) == 0 { + return nil + } + return apierrors.NewAggregate(errs) +} + +func (cfgCtlr *configController) lockAndDigestConfigObjects(newPLs []*flowcontrol.PriorityLevelConfiguration, newFSs []*flowcontrol.FlowSchema) []fsStatusUpdate { + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + meal := cfgMeal{ + cfgCtlr: cfgCtlr, + newPLStates: make(map[string]*priorityLevelState), + } + + meal.digestNewPLsLocked(newPLs) + meal.digestFlowSchemasLocked(newFSs) + meal.processOldPLsLocked() + + // Supply missing mandatory PriorityLevelConfiguration objects + if !meal.haveExemptPL { + meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt, cfgCtlr.requestWaitLimit) + } + if !meal.haveCatchAllPL { + meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll, cfgCtlr.requestWaitLimit) + } + + meal.finishQueueSetReconfigsLocked() + + // The new config has been constructed + cfgCtlr.priorityLevelStates = meal.newPLStates + klog.V(5).Infof("Switched to new API Priority and Fairness configuration") + return meal.fsStatusUpdates +} + +// Digest the new set of PriorityLevelConfiguration objects. +// Pretend broken ones do not exist. +func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfiguration) { + for _, pl := range newPLs { + state := meal.cfgCtlr.priorityLevelStates[pl.Name] + if state == nil { + state = &priorityLevelState{obsPair: meal.cfgCtlr.obsPairGenerator.Generate(1, 1, []string{pl.Name})} + } + qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues, pl, meal.cfgCtlr.requestWaitLimit, state.obsPair) + if err != nil { + klog.Warningf("Ignoring PriorityLevelConfiguration object %s because its spec (%s) is broken: %s", pl.Name, fcfmt.Fmt(pl.Spec), err) + continue + } + meal.newPLStates[pl.Name] = state + state.pl = pl + state.qsCompleter = qsCompleter + if state.quiescing { // it was undesired, but no longer + klog.V(3).Infof("Priority level %q was undesired and has become desired again", pl.Name) + state.quiescing = false + } + if state.pl.Spec.Limited != nil { + meal.shareSum += float64(state.pl.Spec.Limited.AssuredConcurrencyShares) + } + meal.haveExemptPL = meal.haveExemptPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameExempt + meal.haveCatchAllPL = meal.haveCatchAllPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameCatchAll + } +} + +// Digest the given FlowSchema objects. Ones that reference a missing +// or broken priority level are not to be passed on to the filter for +// use. We do this before holding over old priority levels so that +// requests stop going to those levels and FlowSchemaStatus values +// reflect this. This function also adds any missing mandatory +// FlowSchema objects. The given objects must all have distinct +// names. +func (meal *cfgMeal) digestFlowSchemasLocked(newFSs []*flowcontrol.FlowSchema) { + fsSeq := make(apihelpers.FlowSchemaSequence, 0, len(newFSs)) + fsMap := make(map[string]*flowcontrol.FlowSchema, len(newFSs)) + var haveExemptFS, haveCatchAllFS bool + for i, fs := range newFSs { + otherFS := fsMap[fs.Name] + if otherFS != nil { + // This client is forbidden to do this. + panic(fmt.Sprintf("Given two FlowSchema objects with the same name: %s and %s", fcfmt.Fmt(otherFS), fcfmt.Fmt(fs))) + } + fsMap[fs.Name] = fs + _, goodPriorityRef := meal.newPLStates[fs.Spec.PriorityLevelConfiguration.Name] + + // Ensure the object's status reflects whether its priority + // level reference is broken. + // + // TODO: consider not even trying if server is not handling + // requests yet. + meal.presyncFlowSchemaStatus(fs, !goodPriorityRef, fs.Spec.PriorityLevelConfiguration.Name) + + if !goodPriorityRef { + klog.V(6).Infof("Ignoring FlowSchema %s because of bad priority level reference %q", fs.Name, fs.Spec.PriorityLevelConfiguration.Name) + continue + } + fsSeq = append(fsSeq, newFSs[i]) + haveExemptFS = haveExemptFS || fs.Name == flowcontrol.FlowSchemaNameExempt + haveCatchAllFS = haveCatchAllFS || fs.Name == flowcontrol.FlowSchemaNameCatchAll + } + // sort into the order to be used for matching + sort.Sort(fsSeq) + + // Supply missing mandatory FlowSchemas, in correct position + if !haveExemptFS { + fsSeq = append(apihelpers.FlowSchemaSequence{fcboot.MandatoryFlowSchemaExempt}, fsSeq...) + } + if !haveCatchAllFS { + fsSeq = append(fsSeq, fcboot.MandatoryFlowSchemaCatchAll) + } + + meal.cfgCtlr.flowSchemas = fsSeq + if klog.V(5).Enabled() { + for _, fs := range fsSeq { + klog.Infof("Using FlowSchema %s", fcfmt.Fmt(fs)) + } + } +} + +// Consider all the priority levels in the previous configuration. +// Keep the ones that are in the new config, supply mandatory +// behavior, or are still busy; for the rest: drop it if it has no +// queues, otherwise start the quiescing process if that has not +// already been started. +func (meal *cfgMeal) processOldPLsLocked() { + for plName, plState := range meal.cfgCtlr.priorityLevelStates { + if meal.newPLStates[plName] != nil { + // Still desired and already updated + continue + } + if plName == flowcontrol.PriorityLevelConfigurationNameExempt && !meal.haveExemptPL || plName == flowcontrol.PriorityLevelConfigurationNameCatchAll && !meal.haveCatchAllPL { + // BTW, we know the Spec has not changed because the + // mandatory objects have immutable Specs + klog.V(3).Infof("Retaining mandatory priority level %q despite lack of API object", plName) + } else { + if plState.queues == nil || plState.numPending == 0 && plState.queues.IsIdle() { + // Either there are no queues or they are done + // draining and no use is coming from another + // goroutine + klog.V(3).Infof("Removing undesired priority level %q (nilQueues=%v), Type=%v", plName, plState.queues == nil, plState.pl.Spec.Type) + continue + } + if !plState.quiescing { + klog.V(3).Infof("Priority level %q became undesired", plName) + plState.quiescing = true + } + } + var err error + plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues, plState.pl, meal.cfgCtlr.requestWaitLimit, plState.obsPair) + if err != nil { + // This can not happen because queueSetCompleterForPL already approved this config + panic(fmt.Sprintf("%s from name=%q spec=%s", err, plName, fcfmt.Fmt(plState.pl.Spec))) + } + if plState.pl.Spec.Limited != nil { + // We deliberately include the lingering priority levels + // here so that their queues get some concurrency and they + // continue to drain. During this interim a lingering + // priority level continues to get a concurrency + // allocation determined by all the share values in the + // regular way. + meal.shareSum += float64(plState.pl.Spec.Limited.AssuredConcurrencyShares) + } + meal.haveExemptPL = meal.haveExemptPL || plName == flowcontrol.PriorityLevelConfigurationNameExempt + meal.haveCatchAllPL = meal.haveCatchAllPL || plName == flowcontrol.PriorityLevelConfigurationNameCatchAll + meal.newPLStates[plName] = plState + } +} + +// For all the priority levels of the new config, divide up the +// server's total concurrency limit among them and create/update their +// QueueSets. +func (meal *cfgMeal) finishQueueSetReconfigsLocked() { + for plName, plState := range meal.newPLStates { + if plState.pl.Spec.Limited == nil { + klog.V(5).Infof("Using exempt priority level %q: quiescing=%v", plName, plState.quiescing) + continue + } + + // The use of math.Ceil here means that the results might sum + // to a little more than serverConcurrencyLimit but the + // difference will be negligible. + concurrencyLimit := int(math.Ceil(float64(meal.cfgCtlr.serverConcurrencyLimit) * float64(plState.pl.Spec.Limited.AssuredConcurrencyShares) / meal.shareSum)) + metrics.UpdateSharedConcurrencyLimit(plName, concurrencyLimit) + + if plState.queues == nil { + klog.V(5).Infof("Introducing queues for priority level %q: config=%s, concurrencyLimit=%d, quiescing=%v (shares=%v, shareSum=%v)", plName, fcfmt.Fmt(plState.pl.Spec), concurrencyLimit, plState.quiescing, plState.pl.Spec.Limited.AssuredConcurrencyShares, meal.shareSum) + } else { + klog.V(5).Infof("Retaining queues for priority level %q: config=%s, concurrencyLimit=%d, quiescing=%v, numPending=%d (shares=%v, shareSum=%v)", plName, fcfmt.Fmt(plState.pl.Spec), concurrencyLimit, plState.quiescing, plState.numPending, plState.pl.Spec.Limited.AssuredConcurrencyShares, meal.shareSum) + } + plState.queues = plState.qsCompleter.Complete(fq.DispatchingConfig{ConcurrencyLimit: concurrencyLimit}) + } +} + +// queueSetCompleterForPL returns an appropriate QueueSetCompleter for the +// given priority level configuration. Returns nil if that config +// does not call for limiting. Returns nil and an error if the given +// object is malformed in a way that is a problem for this package. +func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, intPair metrics.TimedObserverPair) (fq.QueueSetCompleter, error) { + if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementExempt) != (pl.Spec.Limited == nil) { + return nil, errors.New("broken union structure at the top") + } + if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementExempt) != (pl.Name == flowcontrol.PriorityLevelConfigurationNameExempt) { + // This package does not attempt to cope with a priority level dynamically switching between exempt and not. + return nil, errors.New("non-alignment between name and type") + } + if pl.Spec.Limited == nil { + return nil, nil + } + if (pl.Spec.Limited.LimitResponse.Type == flowcontrol.LimitResponseTypeReject) != (pl.Spec.Limited.LimitResponse.Queuing == nil) { + return nil, errors.New("broken union structure for limit response") + } + qcAPI := pl.Spec.Limited.LimitResponse.Queuing + qcQS := fq.QueuingConfig{Name: pl.Name} + if qcAPI != nil { + qcQS = fq.QueuingConfig{Name: pl.Name, + DesiredNumQueues: int(qcAPI.Queues), + QueueLengthLimit: int(qcAPI.QueueLengthLimit), + HandSize: int(qcAPI.HandSize), + RequestWaitLimit: requestWaitLimit, + } + } + var qsc fq.QueueSetCompleter + var err error + if queues != nil { + qsc, err = queues.BeginConfigChange(qcQS) + } else { + qsc, err = qsf.BeginConstruction(qcQS, intPair) + } + if err != nil { + err = errors.Wrap(err, fmt.Sprintf("priority level %q has QueuingConfiguration %#+v, which is invalid", pl.Name, qcAPI)) + } + return qsc, err +} + +func (meal *cfgMeal) presyncFlowSchemaStatus(fs *flowcontrol.FlowSchema, isDangling bool, plName string) { + danglingCondition := apihelpers.GetFlowSchemaConditionByType(fs, flowcontrol.FlowSchemaConditionDangling) + if danglingCondition == nil { + danglingCondition = &flowcontrol.FlowSchemaCondition{ + Type: flowcontrol.FlowSchemaConditionDangling, + } + } + desiredStatus := flowcontrol.ConditionFalse + var desiredReason, desiredMessage string + if isDangling { + desiredStatus = flowcontrol.ConditionTrue + desiredReason = "NotFound" + desiredMessage = fmt.Sprintf("This FlowSchema references the PriorityLevelConfiguration object named %q but there is no such object", plName) + } else { + desiredReason = "Found" + desiredMessage = fmt.Sprintf("This FlowSchema references the PriorityLevelConfiguration object named %q and it exists", plName) + } + if danglingCondition.Status == desiredStatus && danglingCondition.Reason == desiredReason && danglingCondition.Message == desiredMessage { + return + } + meal.fsStatusUpdates = append(meal.fsStatusUpdates, fsStatusUpdate{ + flowSchema: fs, + condition: flowcontrol.FlowSchemaCondition{ + Type: flowcontrol.FlowSchemaConditionDangling, + Status: desiredStatus, + LastTransitionTime: metav1.Now(), + Reason: desiredReason, + Message: desiredMessage, + }, + oldValue: *danglingCondition}) +} + +// imaginePL adds a priority level based on one of the mandatory ones +// that does not actually exist (right now) as a real API object. +func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration) { + klog.V(3).Infof("No %s PriorityLevelConfiguration found, imagining one", proto.Name) + obsPair := meal.cfgCtlr.obsPairGenerator.Generate(1, 1, []string{proto.Name}) + qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, requestWaitLimit, obsPair) + if err != nil { + // This can not happen because proto is one of the mandatory + // objects and these are not erroneous + panic(err) + } + meal.newPLStates[proto.Name] = &priorityLevelState{ + pl: proto, + qsCompleter: qsCompleter, + obsPair: obsPair, + } + if proto.Spec.Limited != nil { + meal.shareSum += float64(proto.Spec.Limited.AssuredConcurrencyShares) + } + return +} + +type immediateRequest struct{} + +func (immediateRequest) Finish(execute func()) bool { + execute() + return false +} + +// startRequest classifies and, if appropriate, enqueues the request. +// Returns a nil Request if and only if the request is to be rejected. +// The returned bool indicates whether the request is exempt from +// limitation. The startWaitingTime is when the request started +// waiting in its queue, or `Time{}` if this did not happen. +func (cfgCtlr *configController) startRequest(ctx context.Context, rd RequestDigest, queueNoteFn fq.QueueNoteFn) (fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, isExempt bool, req fq.Request, startWaitingTime time.Time) { + klog.V(7).Infof("startRequest(%#+v)", rd) + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + var selectedFlowSchema *flowcontrol.FlowSchema + for _, fs := range cfgCtlr.flowSchemas { + if matchesFlowSchema(rd, fs) { + selectedFlowSchema = fs + break + } + } + if selectedFlowSchema == nil { + // This should never happen. If the requestDigest's User is a part of + // system:authenticated or system:unauthenticated, the catch-all flow + // schema should match it. However, if that invariant somehow fails, + // fallback to the catch-all flow schema anyway. + for _, fs := range cfgCtlr.flowSchemas { + if fs.Name == flowcontrol.FlowSchemaNameCatchAll { + selectedFlowSchema = fs + break + } + } + if selectedFlowSchema == nil { + // This should absolutely never, ever happen! APF guarantees two + // undeletable flow schemas at all times: an exempt flow schema and a + // catch-all flow schema. + panic(fmt.Sprintf("no fallback catch-all flow schema found for request %#+v and user %#+v", rd.RequestInfo, rd.User)) + } + klog.Warningf("no match found for request %#+v and user %#+v; selecting catchAll=%s as fallback flow schema", rd.RequestInfo, rd.User, fcfmt.Fmt(selectedFlowSchema)) + } + plName := selectedFlowSchema.Spec.PriorityLevelConfiguration.Name + plState := cfgCtlr.priorityLevelStates[plName] + if plState.pl.Spec.Type == flowcontrol.PriorityLevelEnablementExempt { + klog.V(7).Infof("startRequest(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, immediate", rd, selectedFlowSchema.Name, selectedFlowSchema.Spec.DistinguisherMethod, plName) + return selectedFlowSchema, plState.pl, true, immediateRequest{}, time.Time{} + } + var numQueues int32 + if plState.pl.Spec.Limited.LimitResponse.Type == flowcontrol.LimitResponseTypeQueue { + numQueues = plState.pl.Spec.Limited.LimitResponse.Queuing.Queues + } + var flowDistinguisher string + var hashValue uint64 + if numQueues > 1 { + flowDistinguisher = computeFlowDistinguisher(rd, selectedFlowSchema.Spec.DistinguisherMethod) + hashValue = hashFlowID(selectedFlowSchema.Name, flowDistinguisher) + } + startWaitingTime = time.Now() + klog.V(7).Infof("startRequest(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, numQueues=%d", rd, selectedFlowSchema.Name, selectedFlowSchema.Spec.DistinguisherMethod, plName, numQueues) + req, idle := plState.queues.StartRequest(ctx, hashValue, flowDistinguisher, selectedFlowSchema.Name, rd.RequestInfo, rd.User, queueNoteFn) + if idle { + cfgCtlr.maybeReapLocked(plName, plState) + } + return selectedFlowSchema, plState.pl, false, req, startWaitingTime +} + +// Call this after getting a clue that the given priority level is undesired and idle +func (cfgCtlr *configController) maybeReap(plName string) { + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + plState := cfgCtlr.priorityLevelStates[plName] + if plState == nil { + klog.V(7).Infof("plName=%s, plState==nil", plName) + return + } + if plState.queues != nil { + useless := plState.quiescing && plState.numPending == 0 && plState.queues.IsIdle() + klog.V(7).Infof("plState.quiescing=%v, plState.numPending=%d, useless=%v", plState.quiescing, plState.numPending, useless) + if !useless { + return + } + } + klog.V(3).Infof("Triggered API priority and fairness config reloading because priority level %s is undesired and idle", plName) + cfgCtlr.configQueue.Add(0) +} + +// Call this if both (1) plState.queues is non-nil and reported being +// idle, and (2) cfgCtlr's lock has not been released since then. +func (cfgCtlr *configController) maybeReapLocked(plName string, plState *priorityLevelState) { + if !(plState.quiescing && plState.numPending == 0) { + return + } + klog.V(3).Infof("Triggered API priority and fairness config reloading because priority level %s is undesired and idle", plName) + cfgCtlr.configQueue.Add(0) +} + +// computeFlowDistinguisher extracts the flow distinguisher according to the given method +func computeFlowDistinguisher(rd RequestDigest, method *flowcontrol.FlowDistinguisherMethod) string { + if method == nil { + return "" + } + switch method.Type { + case flowcontrol.FlowDistinguisherMethodByUserType: + return rd.User.GetName() + case flowcontrol.FlowDistinguisherMethodByNamespaceType: + return rd.RequestInfo.Namespace + default: + // this line shall never reach + panic("invalid flow-distinguisher method") + } +} + +func hashFlowID(fsName, fDistinguisher string) uint64 { + hash := sha256.New() + var sep = [1]byte{0} + hash.Write([]byte(fsName)) + hash.Write(sep[:]) + hash.Write([]byte(fDistinguisher)) + var sum [32]byte + hash.Sum(sum[:0]) + return binary.LittleEndian.Uint64(sum[:8]) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go new file mode 100644 index 0000000000..3c2c9fc744 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go @@ -0,0 +1,268 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "fmt" + "io" + "net/http" + "strconv" + "strings" + "text/tabwriter" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/server/mux" +) + +const ( + queryIncludeRequestDetails = "includeRequestDetails" +) + +func (cfgCtlr *configController) Install(c *mux.PathRecorderMux) { + // TODO(yue9944882): handle "Accept" header properly + // debugging dumps a CSV content for three levels of granularity + // 1. row per priority-level + c.UnlistedHandleFunc("/debug/api_priority_and_fairness/dump_priority_levels", cfgCtlr.dumpPriorityLevels) + // 2. row per queue + c.UnlistedHandleFunc("/debug/api_priority_and_fairness/dump_queues", cfgCtlr.dumpQueues) + // 3. row per request + c.UnlistedHandleFunc("/debug/api_priority_and_fairness/dump_requests", cfgCtlr.dumpRequests) +} + +func (cfgCtlr *configController) dumpPriorityLevels(w http.ResponseWriter, r *http.Request) { + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + tabWriter := tabwriter.NewWriter(w, 8, 0, 1, ' ', 0) + columnHeaders := []string{ + "PriorityLevelName", // 1 + "ActiveQueues", // 2 + "IsIdle", // 3 + "IsQuiescing", // 4 + "WaitingRequests", // 5 + "ExecutingRequests", // 6 + } + tabPrint(tabWriter, rowForHeaders(columnHeaders)) + endLine(tabWriter) + for _, plState := range cfgCtlr.priorityLevelStates { + if plState.queues == nil { + tabPrint(tabWriter, row( + plState.pl.Name, // 1 + "", // 2 + "", // 3 + "", // 4 + "", // 5 + "", // 6 + )) + endLine(tabWriter) + continue + } + queueSetDigest := plState.queues.Dump(false) + activeQueueNum := 0 + for _, q := range queueSetDigest.Queues { + if len(q.Requests) > 0 { + activeQueueNum++ + } + } + + tabPrint(tabWriter, rowForPriorityLevel( + plState.pl.Name, // 1 + activeQueueNum, // 2 + plState.queues.IsIdle(), // 3 + plState.quiescing, // 4 + queueSetDigest.Waiting, // 5 + queueSetDigest.Executing, // 6 + )) + endLine(tabWriter) + } + runtime.HandleError(tabWriter.Flush()) +} + +func (cfgCtlr *configController) dumpQueues(w http.ResponseWriter, r *http.Request) { + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + tabWriter := tabwriter.NewWriter(w, 8, 0, 1, ' ', 0) + columnHeaders := []string{ + "PriorityLevelName", // 1 + "Index", // 2 + "PendingRequests", // 3 + "ExecutingRequests", // 4 + "VirtualStart", // 5 + } + tabPrint(tabWriter, rowForHeaders(columnHeaders)) + endLine(tabWriter) + for _, plState := range cfgCtlr.priorityLevelStates { + if plState.queues == nil { + tabPrint(tabWriter, row( + plState.pl.Name, // 1 + "", // 2 + "", // 3 + "", // 4 + "", // 5 + )) + endLine(tabWriter) + continue + } + queueSetDigest := plState.queues.Dump(false) + for i, q := range queueSetDigest.Queues { + tabPrint(tabWriter, rowForQueue( + plState.pl.Name, // 1 + i, // 2 + len(q.Requests), // 3 + q.ExecutingRequests, // 4 + q.VirtualStart, // 5 + )) + endLine(tabWriter) + } + } + runtime.HandleError(tabWriter.Flush()) +} + +func (cfgCtlr *configController) dumpRequests(w http.ResponseWriter, r *http.Request) { + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + + includeRequestDetails := len(r.URL.Query().Get(queryIncludeRequestDetails)) > 0 + + tabWriter := tabwriter.NewWriter(w, 8, 0, 1, ' ', 0) + tabPrint(tabWriter, rowForHeaders([]string{ + "PriorityLevelName", // 1 + "FlowSchemaName", // 2 + "QueueIndex", // 3 + "RequestIndexInQueue", // 4 + "FlowDistingsher", // 5 + "ArriveTime", // 6 + })) + if includeRequestDetails { + continueLine(tabWriter) + tabPrint(tabWriter, rowForHeaders([]string{ + "UserName", // 7 + "Verb", // 8 + "APIPath", // 9 + "Namespace", // 10 + "Name", // 11 + "APIVersion", // 12 + "Resource", // 13 + "SubResource", // 14 + })) + } + endLine(tabWriter) + for _, plState := range cfgCtlr.priorityLevelStates { + if plState.queues == nil { + continue + } + queueSetDigest := plState.queues.Dump(includeRequestDetails) + for iq, q := range queueSetDigest.Queues { + for ir, r := range q.Requests { + tabPrint(tabWriter, rowForRequest( + plState.pl.Name, // 1 + r.MatchedFlowSchema, // 2 + iq, // 3 + ir, // 4 + r.FlowDistinguisher, // 5 + r.ArriveTime, // 6 + )) + if includeRequestDetails { + continueLine(tabWriter) + tabPrint(tabWriter, rowForRequestDetails( + r.UserName, // 7 + r.RequestInfo.Verb, // 8 + r.RequestInfo.Path, // 9 + r.RequestInfo.Namespace, // 10 + r.RequestInfo.Name, // 11 + schema.GroupVersion{ + Group: r.RequestInfo.APIGroup, + Version: r.RequestInfo.APIVersion, + }.String(), // 12 + r.RequestInfo.Resource, // 13 + r.RequestInfo.Subresource, // 14 + )) + } + endLine(tabWriter) + } + } + } + runtime.HandleError(tabWriter.Flush()) +} + +func tabPrint(w io.Writer, row string) { + _, err := fmt.Fprint(w, row) + runtime.HandleError(err) +} + +func continueLine(w io.Writer) { + _, err := fmt.Fprint(w, ",\t") + runtime.HandleError(err) +} +func endLine(w io.Writer) { + _, err := fmt.Fprint(w, "\n") + runtime.HandleError(err) +} + +func rowForHeaders(headers []string) string { + return row(headers...) +} + +func rowForPriorityLevel(plName string, activeQueues int, isIdle, isQuiescing bool, waitingRequests, executingRequests int) string { + return row( + plName, + strconv.Itoa(activeQueues), + strconv.FormatBool(isIdle), + strconv.FormatBool(isQuiescing), + strconv.Itoa(waitingRequests), + strconv.Itoa(executingRequests), + ) +} + +func rowForQueue(plName string, index, waitingRequests, executingRequests int, virtualStart float64) string { + return row( + plName, + strconv.Itoa(index), + strconv.Itoa(waitingRequests), + strconv.Itoa(executingRequests), + fmt.Sprintf("%.4f", virtualStart), + ) +} + +func rowForRequest(plName, fsName string, queueIndex, requestIndex int, flowDistinguisher string, arriveTime time.Time) string { + return row( + plName, + fsName, + strconv.Itoa(queueIndex), + strconv.Itoa(requestIndex), + flowDistinguisher, + arriveTime.UTC().Format(time.RFC3339Nano), + ) +} + +func rowForRequestDetails(username, verb, path, namespace, name, apiVersion, resource, subResource string) string { + return row( + username, + verb, + path, + namespace, + name, + apiVersion, + resource, + subResource, + ) +} + +func row(columns ...string) string { + return strings.Join(columns, ",\t") +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go new file mode 100644 index 0000000000..327d50c148 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go @@ -0,0 +1,132 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "context" + "strconv" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/server/mux" + "k8s.io/apiserver/pkg/util/flowcontrol/counter" + fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" + fqs "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset" + "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/klog/v2" + + flowcontrol "k8s.io/api/flowcontrol/v1beta1" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" +) + +// Interface defines how the API Priority and Fairness filter interacts with the underlying system. +type Interface interface { + // Handle takes care of queuing and dispatching a request + // characterized by the given digest. The given `noteFn` will be + // invoked with the results of request classification. If the + // request is queued then `queueNoteFn` will be called twice, + // first with `true` and then with `false`; otherwise + // `queueNoteFn` will not be called at all. If Handle decides + // that the request should be executed then `execute()` will be + // invoked once to execute the request; otherwise `execute()` will + // not be invoked. + Handle(ctx context.Context, + requestDigest RequestDigest, + noteFn func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration), + queueNoteFn fq.QueueNoteFn, + execFn func(), + ) + + // MaintainObservations is a helper for maintaining statistics. + MaintainObservations(stopCh <-chan struct{}) + + // Run monitors config objects from the main apiservers and causes + // any needed changes to local behavior. This method ceases + // activity and returns after the given channel is closed. + Run(stopCh <-chan struct{}) error + + // Install installs debugging endpoints to the web-server. + Install(c *mux.PathRecorderMux) +} + +// This request filter implements https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190228-priority-and-fairness.md + +// New creates a new instance to implement API priority and fairness +func New( + informerFactory kubeinformers.SharedInformerFactory, + flowcontrolClient flowcontrolclient.FlowcontrolV1beta1Interface, + serverConcurrencyLimit int, + requestWaitLimit time.Duration, +) Interface { + grc := counter.NoOp{} + return NewTestable( + informerFactory, + flowcontrolClient, + serverConcurrencyLimit, + requestWaitLimit, + metrics.PriorityLevelConcurrencyObserverPairGenerator, + fqs.NewQueueSetFactory(&clock.RealClock{}, grc), + ) +} + +// NewTestable is extra flexible to facilitate testing +func NewTestable( + informerFactory kubeinformers.SharedInformerFactory, + flowcontrolClient flowcontrolclient.FlowcontrolV1beta1Interface, + serverConcurrencyLimit int, + requestWaitLimit time.Duration, + obsPairGenerator metrics.TimedObserverPairGenerator, + queueSetFactory fq.QueueSetFactory, +) Interface { + return newTestableController(informerFactory, flowcontrolClient, serverConcurrencyLimit, requestWaitLimit, obsPairGenerator, queueSetFactory) +} + +func (cfgCtlr *configController) Handle(ctx context.Context, requestDigest RequestDigest, + noteFn func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration), + queueNoteFn fq.QueueNoteFn, + execFn func()) { + fs, pl, isExempt, req, startWaitingTime := cfgCtlr.startRequest(ctx, requestDigest, queueNoteFn) + queued := startWaitingTime != time.Time{} + noteFn(fs, pl) + if req == nil { + if queued { + metrics.ObserveWaitingDuration(pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime)) + } + klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, reject", requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt) + return + } + klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, queued=%v", requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt, queued) + var executed bool + idle := req.Finish(func() { + if queued { + metrics.ObserveWaitingDuration(pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime)) + } + metrics.AddDispatch(pl.Name, fs.Name) + executed = true + startExecutionTime := time.Now() + execFn() + metrics.ObserveExecutionDuration(pl.Name, fs.Name, time.Since(startExecutionTime)) + }) + if queued && !executed { + metrics.ObserveWaitingDuration(pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime)) + } + klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, queued=%v, Finish() => idle=%v", requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt, queued, idle) + if idle { + cfgCtlr.maybeReap(pl.Name) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/interface.go new file mode 100644 index 0000000000..0418e1217a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/interface.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package counter + +// GoRoutineCounter keeps track of the number of active goroutines +// working on/for something. This is a utility that makes such code more +// testable. The code uses this utility to report the number of active +// goroutines to the test code, so that the test code can advance a fake +// clock when and only when the code being tested has finished all +// the work that is ready to do at the present time. +type GoRoutineCounter interface { + // Add adds the given delta to the count of active goroutines. + // Call Add(1) before forking a goroutine, Add(-1) at the end of that goroutine. + // Call Add(-1) just before waiting on something from another goroutine (e.g., + // just before a `select`). + // Call Add(1) just before doing something that unblocks a goroutine that is + // waiting on that something. + Add(delta int) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/noop.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/noop.go new file mode 100644 index 0000000000..fa946f6f08 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/noop.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package counter + +// NoOp is a GoRoutineCounter that does not actually count +type NoOp struct{} + +var _ GoRoutineCounter = NoOp{} + +// Add would adjust the count, if a count were being kept +func (NoOp) Add(int) {} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go new file mode 100644 index 0000000000..5e44676491 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go @@ -0,0 +1,48 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debug + +import ( + "time" + + "k8s.io/apiserver/pkg/endpoints/request" +) + +// QueueSetDump is an instant dump of queue-set. +type QueueSetDump struct { + Queues []QueueDump + Waiting int + Executing int +} + +// QueueDump is an instant dump of one queue in a queue-set. +type QueueDump struct { + Requests []RequestDump + VirtualStart float64 + ExecutingRequests int +} + +// RequestDump is an instant dump of one requests pending in the queue. +type RequestDump struct { + MatchedFlowSchema string + FlowDistinguisher string + ArriveTime time.Time + StartTime time.Time + // request details + UserName string + RequestInfo request.RequestInfo +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go new file mode 100644 index 0000000000..dcba6f2c2e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fairqueuing + +import ( + "math" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/util/flowcontrol/metrics" +) + +// Integrator computes the moments of some variable X over time as +// read from a particular clock. The integrals start when the +// Integrator is created, and ends at the latest operation on the +// Integrator. As a `metrics.TimedObserver` this fixes X1=1 and +// ignores attempts to change X1. +type Integrator interface { + metrics.TimedObserver + + GetResults() IntegratorResults + + // Return the results of integrating to now, and reset integration to start now + Reset() IntegratorResults +} + +// IntegratorResults holds statistical abstracts of the integration +type IntegratorResults struct { + Duration float64 //seconds + Average float64 //time-weighted + Deviation float64 //standard deviation: sqrt(avg((value-avg)^2)) + Min, Max float64 +} + +// Equal tests for semantic equality. +// This considers all NaN values to be equal to each other. +func (x *IntegratorResults) Equal(y *IntegratorResults) bool { + return x == y || x != nil && y != nil && x.Duration == y.Duration && x.Min == y.Min && x.Max == y.Max && (x.Average == y.Average || math.IsNaN(x.Average) && math.IsNaN(y.Average)) && (x.Deviation == y.Deviation || math.IsNaN(x.Deviation) && math.IsNaN(y.Deviation)) +} + +type integrator struct { + clock clock.PassiveClock + sync.Mutex + lastTime time.Time + x float64 + moments Moments + min, max float64 +} + +// NewIntegrator makes one that uses the given clock +func NewIntegrator(clock clock.PassiveClock) Integrator { + return &integrator{ + clock: clock, + lastTime: clock.Now(), + } +} + +func (igr *integrator) SetX1(x1 float64) { +} + +func (igr *integrator) Set(x float64) { + igr.Lock() + igr.setLocked(x) + igr.Unlock() +} + +func (igr *integrator) setLocked(x float64) { + igr.updateLocked() + igr.x = x + if x < igr.min { + igr.min = x + } + if x > igr.max { + igr.max = x + } +} + +func (igr *integrator) Add(deltaX float64) { + igr.Lock() + igr.setLocked(igr.x + deltaX) + igr.Unlock() +} + +func (igr *integrator) updateLocked() { + now := igr.clock.Now() + dt := now.Sub(igr.lastTime).Seconds() + igr.lastTime = now + igr.moments = igr.moments.Add(ConstantMoments(dt, igr.x)) +} + +func (igr *integrator) GetResults() IntegratorResults { + igr.Lock() + defer igr.Unlock() + return igr.getResultsLocked() +} + +func (igr *integrator) Reset() IntegratorResults { + igr.Lock() + defer igr.Unlock() + results := igr.getResultsLocked() + igr.moments = Moments{} + igr.min = igr.x + igr.max = igr.x + return results +} + +func (igr *integrator) getResultsLocked() (results IntegratorResults) { + igr.updateLocked() + results.Min, results.Max = igr.min, igr.max + results.Duration = igr.moments.ElapsedSeconds + results.Average, results.Deviation = igr.moments.AvgAndStdDev() + return +} + +// Moments are the integrals of the 0, 1, and 2 powers of some +// variable X over some range of time. +type Moments struct { + ElapsedSeconds float64 // integral of dt + IntegralX float64 // integral of x dt + IntegralXX float64 // integral of x*x dt +} + +// ConstantMoments is for a constant X +func ConstantMoments(dt, x float64) Moments { + return Moments{ + ElapsedSeconds: dt, + IntegralX: x * dt, + IntegralXX: x * x * dt, + } +} + +// Add combines over two ranges of time +func (igr Moments) Add(ogr Moments) Moments { + return Moments{ + ElapsedSeconds: igr.ElapsedSeconds + ogr.ElapsedSeconds, + IntegralX: igr.IntegralX + ogr.IntegralX, + IntegralXX: igr.IntegralXX + ogr.IntegralXX, + } +} + +// Sub finds the difference between a range of time and a subrange +func (igr Moments) Sub(ogr Moments) Moments { + return Moments{ + ElapsedSeconds: igr.ElapsedSeconds - ogr.ElapsedSeconds, + IntegralX: igr.IntegralX - ogr.IntegralX, + IntegralXX: igr.IntegralXX - ogr.IntegralXX, + } +} + +// AvgAndStdDev returns the average and standard devation +func (igr Moments) AvgAndStdDev() (float64, float64) { + if igr.ElapsedSeconds <= 0 { + return math.NaN(), math.NaN() + } + avg := igr.IntegralX / igr.ElapsedSeconds + // standard deviation is sqrt( average( (x - xbar)^2 ) ) + // = sqrt( Integral( x^2 + xbar^2 -2*x*xbar dt ) / Duration ) + // = sqrt( ( Integral( x^2 dt ) + Duration * xbar^2 - 2*xbar*Integral(x dt) ) / Duration) + // = sqrt( Integral(x^2 dt)/Duration - xbar^2 ) + variance := igr.IntegralXX/igr.ElapsedSeconds - avg*avg + if variance >= 0 { + return avg, math.Sqrt(variance) + } + return avg, math.NaN() +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go new file mode 100644 index 0000000000..882a505c81 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fairqueuing + +import ( + "context" + "time" + + "k8s.io/apiserver/pkg/util/flowcontrol/debug" + "k8s.io/apiserver/pkg/util/flowcontrol/metrics" +) + +// QueueSetFactory is used to create QueueSet objects. Creation, like +// config update, is done in two phases: the first phase consumes the +// QueuingConfig and the second consumes the DispatchingConfig. They +// are separated so that errors from the first phase can be found +// before committing to a concurrency allotment for the second. +type QueueSetFactory interface { + // BeginConstruction does the first phase of creating a QueueSet + BeginConstruction(QueuingConfig, metrics.TimedObserverPair) (QueueSetCompleter, error) +} + +// QueueSetCompleter finishes the two-step process of creating or +// reconfiguring a QueueSet +type QueueSetCompleter interface { + // Complete returns a QueueSet configured by the given + // dispatching configuration. + Complete(DispatchingConfig) QueueSet +} + +// QueueSet is the abstraction for the queuing and dispatching +// functionality of one non-exempt priority level. It covers the +// functionality described in the "Assignment to a Queue", "Queuing", +// and "Dispatching" sections of +// https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190228-priority-and-fairness.md +// . Some day we may have connections between priority levels, but +// today is not that day. +type QueueSet interface { + // BeginConfigChange starts the two-step process of updating the + // configuration. No change is made until Complete is called. If + // `C := X.BeginConstruction(q)` then `C.Complete(d)` returns the + // same value `X`. If the QueuingConfig's DesiredNumQueues field + // is zero then the other queuing-specific config parameters are + // not changed, so that the queues continue draining as before. + // In any case, reconfiguration does not discard any queue unless + // and until it is undesired and empty. + BeginConfigChange(QueuingConfig) (QueueSetCompleter, error) + + // IsIdle returns a bool indicating whether the QueueSet was idle + // at the moment of the return. Idle means the QueueSet has zero + // requests queued and zero executing. This bit can change only + // (1) during a call to StartRequest and (2) during a call to + // Request::Finish. In the latter case idleness can only change + // from false to true. + IsIdle() bool + + // StartRequest begins the process of handling a request. If the + // request gets queued and the number of queues is greater than 1 + // then StartRequest uses the given hashValue as the source of + // entropy as it shuffle-shards the request into a queue. The + // descr1 and descr2 values play no role in the logic but appear + // in log messages. This method always returns quickly (without + // waiting for the request to be dequeued). If this method + // returns a nil Request value then caller should reject the + // request and the returned bool indicates whether the QueueSet + // was idle at the moment of the return. Otherwise idle==false + // and the client must call the Finish method of the Request + // exactly once. + StartRequest(ctx context.Context, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn QueueNoteFn) (req Request, idle bool) + + // UpdateObservations makes sure any time-based statistics have + // caught up with the current clock reading + UpdateObservations() + + // Dump saves and returns the instant internal state of the queue-set. + // Note that dumping process will stop the queue-set from proceeding + // any requests. + // For debugging only. + Dump(includeRequestDetails bool) debug.QueueSetDump +} + +// QueueNoteFn is called when a request enters and leaves a queue +type QueueNoteFn func(inQueue bool) + +// Request represents the remainder of the handling of one request +type Request interface { + // Finish determines whether to execute or reject the request and + // invokes `execute` if the decision is to execute the request. + // The returned `idle bool` value indicates whether the QueueSet + // was idle when the value was calculated, but might no longer be + // accurate by the time the client examines that value. + Finish(execute func()) (idle bool) +} + +// QueuingConfig defines the configuration of the queuing aspect of a QueueSet. +type QueuingConfig struct { + // Name is used to identify a queue set, allowing for descriptive information about its intended use + Name string + + // DesiredNumQueues is the number of queues that the API says + // should exist now. This may be zero, in which case + // QueueLengthLimit, HandSize, and RequestWaitLimit are ignored. + DesiredNumQueues int + + // QueueLengthLimit is the maximum number of requests that may be waiting in a given queue at a time + QueueLengthLimit int + + // HandSize is a parameter of shuffle sharding. Upon arrival of a request, a queue is chosen by randomly + // dealing a "hand" of this many queues and then picking one of minimum length. + HandSize int + + // RequestWaitLimit is the maximum amount of time that a request may wait in a queue. + // If, by the end of that time, the request has not been dispatched then it is rejected. + RequestWaitLimit time.Duration +} + +// DispatchingConfig defines the configuration of the dispatching aspect of a QueueSet. +type DispatchingConfig struct { + // ConcurrencyLimit is the maximum number of requests of this QueueSet that may be executing at a time + ConcurrencyLimit int +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/interface.go new file mode 100644 index 0000000000..1977f7522a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/interface.go @@ -0,0 +1,129 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package promise + +// This file defines interfaces for promises and futures and related +// things. These are about coordination among multiple goroutines and +// so are safe for concurrent calls --- although moderated in some +// cases by a requirement that the caller hold a certain lock. + +// Readable represents a variable that is initially not set and later +// becomes set. Some instances may be set to multiple values in +// series. A Readable for a variable that can only get one value is +// commonly known as a "future". +type Readable interface { + // Get reads the current value of this variable. If this variable + // is not set yet then this call blocks until this variable gets a + // value. + Get() interface{} + + // IsSet returns immediately with an indication of whether this + // variable has been set. + IsSet() bool +} + +// LockingReadable is a Readable whose implementation is protected by +// a lock +type LockingReadable interface { + Readable + + // GetLocked is like Get but the caller must already hold the + // lock. GetLocked may release, and later re-acquire, the lock + // any number of times. Get may acquire, and later release, the + // lock any number of times. + GetLocked() interface{} + + // IsSetLocked is like IsSet but the caller must already hold the + // lock. IsSetLocked may release, and later re-acquire, the lock + // any number of times. IsSet may acquire, and later release, the + // lock any number of times. + IsSetLocked() bool +} + +// WriteOnceOnly represents a variable that is initially not set and +// can be set once. +type WriteOnceOnly interface { + // Set normally writes a value into this variable, unblocks every + // goroutine waiting for this variable to have a value, and + // returns true. In the unhappy case that this variable is + // already set, this method returns false without modifying the + // variable's value. + Set(interface{}) bool +} + +// WriteOnce represents a variable that is initially not set and can +// be set once and is readable. This is the common meaning for +// "promise". +type WriteOnce interface { + Readable + WriteOnceOnly +} + +// LockingWriteOnceOnly is a WriteOnceOnly whose implementation is +// protected by a lock. +type LockingWriteOnceOnly interface { + WriteOnceOnly + + // SetLocked is like Set but the caller must already hold the + // lock. SetLocked may release, and later re-acquire, the lock + // any number of times. Set may acquire, and later release, the + // lock any number of times + SetLocked(interface{}) bool +} + +// LockingWriteOnce is a WriteOnce whose implementation is protected +// by a lock. +type LockingWriteOnce interface { + LockingReadable + LockingWriteOnceOnly +} + +// WriteMultipleOnly represents a variable that is initially not set +// and can be set one or more times (unlike a traditional "promise", +// which can be written only once). +type WriteMultipleOnly interface { + // Set writes a value into this variable and unblocks every + // goroutine waiting for this variable to have a value + Set(interface{}) +} + +// WriteMultiple represents a variable that is initially not set and +// can be set one or more times (unlike a traditional "promise", which +// can be written only once) and is readable. +type WriteMultiple interface { + Readable + WriteMultipleOnly +} + +// LockingWriteMultipleOnly is a WriteMultipleOnly whose +// implementation is protected by a lock. +type LockingWriteMultipleOnly interface { + WriteMultipleOnly + + // SetLocked is like Set but the caller must already hold the + // lock. SetLocked may release, and later re-acquire, the lock + // any number of times. Set may acquire, and later release, the + // lock any number of times + SetLocked(interface{}) +} + +// LockingWriteMultiple is a WriteMultiple whose implementation is +// protected by a lock. +type LockingWriteMultiple interface { + LockingReadable + LockingWriteMultipleOnly +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise/lockingpromise.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise/lockingpromise.go new file mode 100644 index 0000000000..db5598f898 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise/lockingpromise.go @@ -0,0 +1,124 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lockingpromise + +import ( + "sync" + + "k8s.io/apiserver/pkg/util/flowcontrol/counter" + "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise" +) + +// promisoid is the data and behavior common to all the promise-like +// abstractions implemented here. This implementation is based on a +// condition variable. This implementation tracks active goroutines: +// the given counter is decremented for a goroutine waiting for this +// varible to be set and incremented when such a goroutine is +// unblocked. +type promisoid struct { + lock sync.Locker + cond sync.Cond + activeCounter counter.GoRoutineCounter // counter of active goroutines + waitingCount int // number of goroutines idle due to this being unset + isSet bool + value interface{} +} + +func (pr *promisoid) Get() interface{} { + pr.lock.Lock() + defer pr.lock.Unlock() + return pr.GetLocked() +} + +func (pr *promisoid) GetLocked() interface{} { + if !pr.isSet { + pr.waitingCount++ + pr.activeCounter.Add(-1) + pr.cond.Wait() + } + return pr.value +} + +func (pr *promisoid) IsSet() bool { + pr.lock.Lock() + defer pr.lock.Unlock() + return pr.IsSetLocked() +} + +func (pr *promisoid) IsSetLocked() bool { + return pr.isSet +} + +func (pr *promisoid) SetLocked(value interface{}) { + pr.isSet = true + pr.value = value + if pr.waitingCount > 0 { + pr.activeCounter.Add(pr.waitingCount) + pr.waitingCount = 0 + pr.cond.Broadcast() + } +} + +type writeOnce struct { + promisoid +} + +var _ promise.LockingWriteOnce = &writeOnce{} + +// NewWriteOnce makes a new promise.LockingWriteOnce +func NewWriteOnce(lock sync.Locker, activeCounter counter.GoRoutineCounter) promise.LockingWriteOnce { + return &writeOnce{promisoid{ + lock: lock, + cond: *sync.NewCond(lock), + activeCounter: activeCounter, + }} +} + +func (wr *writeOnce) Set(value interface{}) bool { + wr.lock.Lock() + defer wr.lock.Unlock() + return wr.SetLocked(value) +} + +func (wr *writeOnce) SetLocked(value interface{}) bool { + if wr.isSet { + return false + } + wr.promisoid.SetLocked(value) + return true +} + +type writeMultiple struct { + promisoid +} + +var _ promise.LockingWriteMultiple = &writeMultiple{} + +// NewWriteMultiple makes a new promise.LockingWriteMultiple +func NewWriteMultiple(lock sync.Locker, activeCounter counter.GoRoutineCounter) promise.LockingWriteMultiple { + return &writeMultiple{promisoid{ + lock: lock, + cond: *sync.NewCond(lock), + activeCounter: activeCounter, + }} +} + +func (wr *writeMultiple) Set(value interface{}) { + wr.lock.Lock() + defer wr.lock.Unlock() + wr.SetLocked(value) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/doc.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/doc.go new file mode 100644 index 0000000000..840d78ea18 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/doc.go @@ -0,0 +1,120 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package queueset implements a technique called "fair queuing for +// server requests". One QueueSet is a set of queues operating +// according to this technique. +// +// Fair queuing for server requests is inspired by the fair queuing +// technique from the world of networking. You can find a good paper +// on that at https://dl.acm.org/citation.cfm?doid=75247.75248 or +// http://people.csail.mit.edu/imcgraw/links/research/pubs/networks/WFQ.pdf +// and there is an implementation outline in the Wikipedia article at +// https://en.wikipedia.org/wiki/Fair_queuing . +// +// Fair queuing for server requests differs from traditional fair +// queuing in three ways: (1) we are dispatching application layer +// requests to a server rather than transmitting packets on a network +// link, (2) multiple requests can be executing at once, and (3) the +// service time (execution duration) is not known until the execution +// completes. +// +// The first two differences can easily be handled by straightforward +// adaptation of the concept called "R(t)" in the original paper and +// "virtual time" in the implementation outline. In that +// implementation outline, the notation now() is used to mean reading +// the virtual clock. In the original paper’s terms, "R(t)" is the +// number of "rounds" that have been completed at real time t --- +// where a round consists of virtually transmitting one bit from every +// non-empty queue in the router (regardless of which queue holds the +// packet that is really being transmitted at the moment); in this +// conception, a packet is considered to be "in" its queue until the +// packet’s transmission is finished. For our problem, we can define a +// round to be giving one nanosecond of CPU to every non-empty queue +// in the apiserver (where emptiness is judged based on both queued +// and executing requests from that queue), and define R(t) = (server +// start time) + (1 ns) * (number of rounds since server start). Let +// us write NEQ(t) for that number of non-empty queues in the +// apiserver at time t. Let us also write C for the concurrency +// limit. In the original paper, the partial derivative of R(t) with +// respect to t is +// +// 1 / NEQ(t) . +// +// To generalize from transmitting one packet at a time to executing C +// requests at a time, that derivative becomes +// +// C / NEQ(t) . +// +// However, sometimes there are fewer than C requests available to +// execute. For a given queue "q", let us also write "reqs(q, t)" for +// the number of requests of that queue that are executing at that +// time. The total number of requests executing is sum[over q] +// reqs(q, t) and if that is less than C then virtual time is not +// advancing as fast as it would if all C seats were occupied; in this +// case the numerator of the quotient in that derivative should be +// adjusted proportionally. Putting it all together for fair queing +// for server requests: at a particular time t, the partial derivative +// of R(t) with respect to t is +// +// min( C, sum[over q] reqs(q, t) ) / NEQ(t) . +// +// In terms of the implementation outline, this is the rate at which +// virtual time is advancing at time t (in virtual nanoseconds per +// real nanosecond). Where the networking implementation outline adds +// packet size to a virtual time, in our version this corresponds to +// adding a service time (i.e., duration) to virtual time. +// +// The third difference is handled by modifying the algorithm to +// dispatch based on an initial guess at the request’s service time +// (duration) and then make the corresponding adjustments once the +// request’s actual service time is known. This is similar, although +// not exactly isomorphic, to the original paper’s adjustment by +// `$\delta$` for the sake of promptness. +// +// For implementation simplicity (see below), let us use the same +// initial service time guess for every request; call that duration +// G. A good choice might be the service time limit (1 +// minute). Different guesses will give slightly different dynamics, +// but any positive number can be used for G without ruining the +// long-term behavior. +// +// As in ordinary fair queuing, there is a bound on divergence from +// the ideal. In plain fair queuing the bound is one packet; in our +// version it is C requests. +// +// To support efficiently making the necessary adjustments once a +// request’s actual service time is known, the virtual finish time of +// a request and the last virtual finish time of a queue are not +// represented directly but instead computed from queue length, +// request position in the queue, and an alternate state variable that +// holds the queue’s virtual start time. While the queue is empty and +// has no requests executing: the value of its virtual start time +// variable is ignored and its last virtual finish time is considered +// to be in the virtual past. When a request arrives to an empty queue +// with no requests executing, the queue’s virtual start time is set +// to the current virtual time. The virtual finish time of request +// number J in the queue (counting from J=1 for the head) is J * G + +// (queue's virtual start time). While the queue is non-empty: the +// last virtual finish time of the queue is the virtual finish time of +// the last request in the queue. While the queue is empty and has a +// request executing: the last virtual finish time is the queue’s +// virtual start time. When a request is dequeued for service the +// queue’s virtual start time is advanced by G. When a request +// finishes being served, and the actual service time was S, the +// queue’s virtual start time is decremented by G - S. +// +package queueset diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go new file mode 100644 index 0000000000..b61d8ce7d0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go @@ -0,0 +1,781 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queueset + +import ( + "context" + "fmt" + "math" + "sync" + "time" + + "github.com/pkg/errors" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/util/flowcontrol/counter" + "k8s.io/apiserver/pkg/util/flowcontrol/debug" + fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" + "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise" + "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + "k8s.io/apiserver/pkg/util/shufflesharding" + "k8s.io/klog/v2" +) + +const nsTimeFmt = "2006-01-02 15:04:05.000000000" + +// queueSetFactory implements the QueueSetFactory interface +// queueSetFactory makes QueueSet objects. +type queueSetFactory struct { + counter counter.GoRoutineCounter + clock clock.PassiveClock +} + +// `*queueSetCompleter` implements QueueSetCompleter. Exactly one of +// the fields `factory` and `theSet` is non-nil. +type queueSetCompleter struct { + factory *queueSetFactory + obsPair metrics.TimedObserverPair + theSet *queueSet + qCfg fq.QueuingConfig + dealer *shufflesharding.Dealer +} + +// queueSet implements the Fair Queuing for Server Requests technique +// described in this package's doc, and a pointer to one implements +// the QueueSet interface. The clock, GoRoutineCounter, and estimated +// service time should not be changed; the fields listed after the +// lock must be accessed only while holding the lock. The methods of +// this type follow the naming convention that the suffix "Locked" +// means the caller must hold the lock; for a method whose name does +// not end in "Locked" either acquires the lock or does not care about +// locking. +type queueSet struct { + clock clock.PassiveClock + counter counter.GoRoutineCounter + estimatedServiceTime float64 + obsPair metrics.TimedObserverPair + + lock sync.Mutex + + // qCfg holds the current queuing configuration. Its + // DesiredNumQueues may be less than the current number of queues. + // If its DesiredNumQueues is zero then its other queuing + // parameters retain the settings they had when DesiredNumQueues + // was last non-zero (if ever). + qCfg fq.QueuingConfig + + // the current dispatching configuration. + dCfg fq.DispatchingConfig + + // If `config.DesiredNumQueues` is non-zero then dealer is not nil + // and is good for `config`. + dealer *shufflesharding.Dealer + + // queues may be longer than the desired number, while the excess + // queues are still draining. + queues []*queue + + // virtualTime is the number of virtual seconds since process startup + virtualTime float64 + + // lastRealTime is what `clock.Now()` yielded when `virtualTime` was last updated + lastRealTime time.Time + + // robinIndex is the index of the last queue dispatched + robinIndex int + + // totRequestsWaiting is the sum, over all the queues, of the + // number of requests waiting in that queue + totRequestsWaiting int + + // totRequestsExecuting is the total number of requests of this + // queueSet that are currently executing. That is the same as the + // sum, over all the queues, of the number of requests executing + // from that queue. + totRequestsExecuting int +} + +// NewQueueSetFactory creates a new QueueSetFactory object +func NewQueueSetFactory(c clock.PassiveClock, counter counter.GoRoutineCounter) fq.QueueSetFactory { + return &queueSetFactory{ + counter: counter, + clock: c, + } +} + +func (qsf *queueSetFactory) BeginConstruction(qCfg fq.QueuingConfig, obsPair metrics.TimedObserverPair) (fq.QueueSetCompleter, error) { + dealer, err := checkConfig(qCfg) + if err != nil { + return nil, err + } + return &queueSetCompleter{ + factory: qsf, + obsPair: obsPair, + qCfg: qCfg, + dealer: dealer}, nil +} + +// checkConfig returns a non-nil Dealer if the config is valid and +// calls for one, and returns a non-nil error if the given config is +// invalid. +func checkConfig(qCfg fq.QueuingConfig) (*shufflesharding.Dealer, error) { + if qCfg.DesiredNumQueues == 0 { + return nil, nil + } + dealer, err := shufflesharding.NewDealer(qCfg.DesiredNumQueues, qCfg.HandSize) + if err != nil { + err = errors.Wrap(err, "the QueueSetConfig implies an invalid shuffle sharding config (DesiredNumQueues is deckSize)") + } + return dealer, err +} + +func (qsc *queueSetCompleter) Complete(dCfg fq.DispatchingConfig) fq.QueueSet { + qs := qsc.theSet + if qs == nil { + qs = &queueSet{ + clock: qsc.factory.clock, + counter: qsc.factory.counter, + estimatedServiceTime: 60, + obsPair: qsc.obsPair, + qCfg: qsc.qCfg, + virtualTime: 0, + lastRealTime: qsc.factory.clock.Now(), + } + } + qs.setConfiguration(qsc.qCfg, qsc.dealer, dCfg) + return qs +} + +// createQueues is a helper method for initializing an array of n queues +func createQueues(n, baseIndex int) []*queue { + fqqueues := make([]*queue, n) + for i := 0; i < n; i++ { + fqqueues[i] = &queue{index: baseIndex + i, requests: make([]*request, 0)} + } + return fqqueues +} + +func (qs *queueSet) BeginConfigChange(qCfg fq.QueuingConfig) (fq.QueueSetCompleter, error) { + dealer, err := checkConfig(qCfg) + if err != nil { + return nil, err + } + return &queueSetCompleter{ + theSet: qs, + qCfg: qCfg, + dealer: dealer}, nil +} + +// SetConfiguration is used to set the configuration for a queueSet. +// Update handling for when fields are updated is handled here as well - +// eg: if DesiredNum is increased, SetConfiguration reconciles by +// adding more queues. +func (qs *queueSet) setConfiguration(qCfg fq.QueuingConfig, dealer *shufflesharding.Dealer, dCfg fq.DispatchingConfig) { + qs.lockAndSyncTime() + defer qs.lock.Unlock() + + if qCfg.DesiredNumQueues > 0 { + // Adding queues is the only thing that requires immediate action + // Removing queues is handled by omitting indexes >DesiredNum from + // chooseQueueIndexLocked + numQueues := len(qs.queues) + if qCfg.DesiredNumQueues > numQueues { + qs.queues = append(qs.queues, + createQueues(qCfg.DesiredNumQueues-numQueues, len(qs.queues))...) + } + } else { + qCfg.QueueLengthLimit = qs.qCfg.QueueLengthLimit + qCfg.HandSize = qs.qCfg.HandSize + qCfg.RequestWaitLimit = qs.qCfg.RequestWaitLimit + } + + qs.qCfg = qCfg + qs.dCfg = dCfg + qs.dealer = dealer + qll := qCfg.QueueLengthLimit + if qll < 1 { + qll = 1 + } + qs.obsPair.RequestsWaiting.SetX1(float64(qll)) + qs.obsPair.RequestsExecuting.SetX1(float64(dCfg.ConcurrencyLimit)) + + qs.dispatchAsMuchAsPossibleLocked() +} + +// A decision about a request +type requestDecision int + +// Values passed through a request's decision +const ( + decisionExecute requestDecision = iota + decisionReject + decisionCancel +) + +// StartRequest begins the process of handling a request. We take the +// approach of updating the metrics about total requests queued and +// executing at each point where there is a change in that quantity, +// because the metrics --- and only the metrics --- track that +// quantity per FlowSchema. +func (qs *queueSet) StartRequest(ctx context.Context, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) (fq.Request, bool) { + qs.lockAndSyncTime() + defer qs.lock.Unlock() + var req *request + + // ======================================================================== + // Step 0: + // Apply only concurrency limit, if zero queues desired + if qs.qCfg.DesiredNumQueues < 1 { + if qs.totRequestsExecuting >= qs.dCfg.ConcurrencyLimit { + klog.V(5).Infof("QS(%s): rejecting request %q %#+v %#+v because %d are executing and the limit is %d", qs.qCfg.Name, fsName, descr1, descr2, qs.totRequestsExecuting, qs.dCfg.ConcurrencyLimit) + metrics.AddReject(qs.qCfg.Name, fsName, "concurrency-limit") + return nil, qs.isIdleLocked() + } + req = qs.dispatchSansQueueLocked(ctx, flowDistinguisher, fsName, descr1, descr2) + return req, false + } + + // ======================================================================== + // Step 1: + // 1) Start with shuffle sharding, to pick a queue. + // 2) Reject old requests that have been waiting too long + // 3) Reject current request if there is not enough concurrency shares and + // we are at max queue length + // 4) If not rejected, create a request and enqueue + req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn) + // req == nil means that the request was rejected - no remaining + // concurrency shares and at max queue length already + if req == nil { + klog.V(5).Infof("QS(%s): rejecting request %q %#+v %#+v due to queue full", qs.qCfg.Name, fsName, descr1, descr2) + metrics.AddReject(qs.qCfg.Name, fsName, "queue-full") + return nil, qs.isIdleLocked() + } + + // ======================================================================== + // Step 2: + // The next step is to invoke the method that dequeues as much + // as possible. + // This method runs a loop, as long as there are non-empty + // queues and the number currently executing is less than the + // assured concurrency value. The body of the loop uses the + // fair queuing technique to pick a queue and dispatch a + // request from that queue. + qs.dispatchAsMuchAsPossibleLocked() + + // ======================================================================== + // Step 3: + + // Set up a relay from the context's Done channel to the world + // of well-counted goroutines. We Are Told that every + // request's context's Done channel gets closed by the time + // the request is done being processed. + doneCh := ctx.Done() + if doneCh != nil { + qs.preCreateOrUnblockGoroutine() + go func() { + defer runtime.HandleCrash() + qs.goroutineDoneOrBlocked() + _ = <-doneCh + // Whatever goroutine unblocked the preceding receive MUST + // have already either (a) incremented qs.counter or (b) + // known that said counter is not actually counting or (c) + // known that the count does not need to be accurate. + // BTW, the count only needs to be accurate in a test that + // uses FakeEventClock::Run(). + klog.V(6).Infof("QS(%s): Context of request %q %#+v %#+v is Done", qs.qCfg.Name, fsName, descr1, descr2) + qs.cancelWait(req) + qs.goroutineDoneOrBlocked() + }() + } + return req, false +} + +func (req *request) NoteQueued(inQueue bool) { + if req.queueNoteFn != nil { + req.queueNoteFn(inQueue) + } +} + +func (req *request) Finish(execFn func()) bool { + exec, idle := req.wait() + if !exec { + return idle + } + execFn() + return req.qs.finishRequestAndDispatchAsMuchAsPossible(req) +} + +func (req *request) wait() (bool, bool) { + qs := req.qs + qs.lock.Lock() + defer qs.lock.Unlock() + if req.waitStarted { + // This can not happen, because the client is forbidden to + // call Wait twice on the same request + panic(fmt.Sprintf("Multiple calls to the Wait method, QueueSet=%s, startTime=%s, descr1=%#+v, descr2=%#+v", req.qs.qCfg.Name, req.startTime, req.descr1, req.descr2)) + } + req.waitStarted = true + + // ======================================================================== + // Step 4: + // The final step is to wait on a decision from + // somewhere and then act on it. + decisionAny := req.decision.GetLocked() + qs.syncTimeLocked() + decision, isDecision := decisionAny.(requestDecision) + if !isDecision { + panic(fmt.Sprintf("QS(%s): Impossible decision %#+v (of type %T) for request %#+v %#+v", qs.qCfg.Name, decisionAny, decisionAny, req.descr1, req.descr2)) + } + switch decision { + case decisionReject: + klog.V(5).Infof("QS(%s): request %#+v %#+v timed out after being enqueued\n", qs.qCfg.Name, req.descr1, req.descr2) + metrics.AddReject(qs.qCfg.Name, req.fsName, "time-out") + return false, qs.isIdleLocked() + case decisionCancel: + // TODO(aaron-prindle) add metrics for this case + klog.V(5).Infof("QS(%s): Ejecting request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) + return false, qs.isIdleLocked() + case decisionExecute: + klog.V(5).Infof("QS(%s): Dispatching request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) + return true, false + default: + // This can not happen, all possible values are handled above + panic(decision) + } +} + +func (qs *queueSet) IsIdle() bool { + qs.lock.Lock() + defer qs.lock.Unlock() + return qs.isIdleLocked() +} + +func (qs *queueSet) isIdleLocked() bool { + return qs.totRequestsWaiting == 0 && qs.totRequestsExecuting == 0 +} + +// lockAndSyncTime acquires the lock and updates the virtual time. +// Doing them together avoids the mistake of modify some queue state +// before calling syncTimeLocked. +func (qs *queueSet) lockAndSyncTime() { + qs.lock.Lock() + qs.syncTimeLocked() +} + +// syncTimeLocked updates the virtual time based on the assumption +// that the current state of the queues has been in effect since +// `qs.lastRealTime`. Thus, it should be invoked after acquiring the +// lock and before modifying the state of any queue. +func (qs *queueSet) syncTimeLocked() { + realNow := qs.clock.Now() + timeSinceLast := realNow.Sub(qs.lastRealTime).Seconds() + qs.lastRealTime = realNow + qs.virtualTime += timeSinceLast * qs.getVirtualTimeRatioLocked() +} + +// getVirtualTimeRatio calculates the rate at which virtual time has +// been advancing, according to the logic in `doc.go`. +func (qs *queueSet) getVirtualTimeRatioLocked() float64 { + activeQueues := 0 + reqs := 0 + for _, queue := range qs.queues { + reqs += queue.requestsExecuting + if len(queue.requests) > 0 || queue.requestsExecuting > 0 { + activeQueues++ + } + } + if activeQueues == 0 { + return 0 + } + return math.Min(float64(reqs), float64(qs.dCfg.ConcurrencyLimit)) / float64(activeQueues) +} + +// timeoutOldRequestsAndRejectOrEnqueueLocked encapsulates the logic required +// to validate and enqueue a request for the queueSet/QueueSet: +// 1) Start with shuffle sharding, to pick a queue. +// 2) Reject old requests that have been waiting too long +// 3) Reject current request if there is not enough concurrency shares and +// we are at max queue length +// 4) If not rejected, create a request and enqueue +// returns the enqueud request on a successful enqueue +// returns nil in the case that there is no available concurrency or +// the queuelengthlimit has been reached +func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request { + // Start with the shuffle sharding, to pick a queue. + queueIdx := qs.chooseQueueIndexLocked(hashValue, descr1, descr2) + queue := qs.queues[queueIdx] + // The next step is the logic to reject requests that have been waiting too long + qs.removeTimedOutRequestsFromQueueLocked(queue, fsName) + // NOTE: currently timeout is only checked for each new request. This means that there can be + // requests that are in the queue longer than the timeout if there are no new requests + // We prefer the simplicity over the promptness, at least for now. + + // Create a request and enqueue + req := &request{ + qs: qs, + fsName: fsName, + flowDistinguisher: flowDistinguisher, + ctx: ctx, + decision: lockingpromise.NewWriteOnce(&qs.lock, qs.counter), + arrivalTime: qs.clock.Now(), + queue: queue, + descr1: descr1, + descr2: descr2, + queueNoteFn: queueNoteFn, + } + if ok := qs.rejectOrEnqueueLocked(req); !ok { + return nil + } + metrics.ObserveQueueLength(qs.qCfg.Name, fsName, len(queue.requests)) + return req +} + +// chooseQueueIndexLocked uses shuffle sharding to select a queue index +// using the given hashValue and the shuffle sharding parameters of the queueSet. +func (qs *queueSet) chooseQueueIndexLocked(hashValue uint64, descr1, descr2 interface{}) int { + bestQueueIdx := -1 + bestQueueLen := int(math.MaxInt32) + // the dealer uses the current desired number of queues, which is no larger than the number in `qs.queues`. + qs.dealer.Deal(hashValue, func(queueIdx int) { + thisLen := len(qs.queues[queueIdx].requests) + klog.V(7).Infof("QS(%s): For request %#+v %#+v considering queue %d of length %d", qs.qCfg.Name, descr1, descr2, queueIdx, thisLen) + if thisLen < bestQueueLen { + bestQueueIdx, bestQueueLen = queueIdx, thisLen + } + }) + klog.V(6).Infof("QS(%s) at r=%s v=%.9fs: For request %#+v %#+v chose queue %d, had %d waiting & %d executing", qs.qCfg.Name, qs.clock.Now().Format(nsTimeFmt), qs.virtualTime, descr1, descr2, bestQueueIdx, bestQueueLen, qs.queues[bestQueueIdx].requestsExecuting) + return bestQueueIdx +} + +// removeTimedOutRequestsFromQueueLocked rejects old requests that have been enqueued +// past the requestWaitLimit +func (qs *queueSet) removeTimedOutRequestsFromQueueLocked(queue *queue, fsName string) { + timeoutIdx := -1 + now := qs.clock.Now() + reqs := queue.requests + // reqs are sorted oldest -> newest + // can short circuit loop (break) if oldest requests are not timing out + // as newer requests also will not have timed out + + // now - requestWaitLimit = waitLimit + waitLimit := now.Add(-qs.qCfg.RequestWaitLimit) + for i, req := range reqs { + if waitLimit.After(req.arrivalTime) { + req.decision.SetLocked(decisionReject) + // get index for timed out requests + timeoutIdx = i + metrics.AddRequestsInQueues(qs.qCfg.Name, req.fsName, -1) + req.NoteQueued(false) + } else { + break + } + } + // remove timed out requests from queue + if timeoutIdx != -1 { + // timeoutIdx + 1 to remove the last timeout req + removeIdx := timeoutIdx + 1 + // remove all the timeout requests + queue.requests = reqs[removeIdx:] + // decrement the # of requestsEnqueued + qs.totRequestsWaiting -= removeIdx + qs.obsPair.RequestsWaiting.Add(float64(-removeIdx)) + } +} + +// rejectOrEnqueueLocked rejects or enqueues the newly arrived +// request, which has been assigned to a queue. If up against the +// queue length limit and the concurrency limit then returns false. +// Otherwise enqueues and returns true. +func (qs *queueSet) rejectOrEnqueueLocked(request *request) bool { + queue := request.queue + curQueueLength := len(queue.requests) + // rejects the newly arrived request if resource criteria not met + if qs.totRequestsExecuting >= qs.dCfg.ConcurrencyLimit && + curQueueLength >= qs.qCfg.QueueLengthLimit { + return false + } + + qs.enqueueLocked(request) + return true +} + +// enqueues a request into its queue. +func (qs *queueSet) enqueueLocked(request *request) { + queue := request.queue + now := qs.clock.Now() + if len(queue.requests) == 0 && queue.requestsExecuting == 0 { + // the queue’s virtual start time is set to the virtual time. + queue.virtualStart = qs.virtualTime + if klog.V(6).Enabled() { + klog.Infof("QS(%s) at r=%s v=%.9fs: initialized queue %d virtual start time due to request %#+v %#+v", qs.qCfg.Name, now.Format(nsTimeFmt), queue.virtualStart, queue.index, request.descr1, request.descr2) + } + } + queue.Enqueue(request) + qs.totRequestsWaiting++ + metrics.AddRequestsInQueues(qs.qCfg.Name, request.fsName, 1) + request.NoteQueued(true) + qs.obsPair.RequestsWaiting.Add(1) +} + +// dispatchAsMuchAsPossibleLocked runs a loop, as long as there +// are non-empty queues and the number currently executing is less than the +// assured concurrency value. The body of the loop uses the fair queuing +// technique to pick a queue, dequeue the request at the head of that +// queue, increment the count of the number executing, and send true +// to the request's channel. +func (qs *queueSet) dispatchAsMuchAsPossibleLocked() { + for qs.totRequestsWaiting != 0 && qs.totRequestsExecuting < qs.dCfg.ConcurrencyLimit { + ok := qs.dispatchLocked() + if !ok { + break + } + } +} + +func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, flowDistinguisher, fsName string, descr1, descr2 interface{}) *request { + now := qs.clock.Now() + req := &request{ + qs: qs, + fsName: fsName, + flowDistinguisher: flowDistinguisher, + ctx: ctx, + startTime: now, + decision: lockingpromise.NewWriteOnce(&qs.lock, qs.counter), + arrivalTime: now, + descr1: descr1, + descr2: descr2, + } + req.decision.SetLocked(decisionExecute) + qs.totRequestsExecuting++ + metrics.AddRequestsExecuting(qs.qCfg.Name, fsName, 1) + qs.obsPair.RequestsExecuting.Add(1) + if klog.V(5).Enabled() { + klog.Infof("QS(%s) at r=%s v=%.9fs: immediate dispatch of request %q %#+v %#+v, qs will have %d executing", qs.qCfg.Name, now.Format(nsTimeFmt), qs.virtualTime, fsName, descr1, descr2, qs.totRequestsExecuting) + } + return req +} + +// dispatchLocked uses the Fair Queuing for Server Requests method to +// select a queue and dispatch the oldest request in that queue. The +// return value indicates whether a request was dispatched; this will +// be false when there are no requests waiting in any queue. +func (qs *queueSet) dispatchLocked() bool { + queue := qs.selectQueueLocked() + if queue == nil { + return false + } + request, ok := queue.Dequeue() + if !ok { // This should never happen. But if it does... + return false + } + request.startTime = qs.clock.Now() + // At this moment the request leaves its queue and starts + // executing. We do not recognize any interim state between + // "queued" and "executing". While that means "executing" + // includes a little overhead from this package, this is not a + // problem because other overhead is also included. + qs.totRequestsWaiting-- + qs.totRequestsExecuting++ + queue.requestsExecuting++ + metrics.AddRequestsInQueues(qs.qCfg.Name, request.fsName, -1) + request.NoteQueued(false) + metrics.AddRequestsExecuting(qs.qCfg.Name, request.fsName, 1) + qs.obsPair.RequestsWaiting.Add(-1) + qs.obsPair.RequestsExecuting.Add(1) + if klog.V(6).Enabled() { + klog.Infof("QS(%s) at r=%s v=%.9fs: dispatching request %#+v %#+v from queue %d with virtual start time %.9fs, queue will have %d waiting & %d executing", qs.qCfg.Name, request.startTime.Format(nsTimeFmt), qs.virtualTime, request.descr1, request.descr2, queue.index, queue.virtualStart, len(queue.requests), queue.requestsExecuting) + } + // When a request is dequeued for service -> qs.virtualStart += G + queue.virtualStart += qs.estimatedServiceTime + request.decision.SetLocked(decisionExecute) + return ok +} + +// cancelWait ensures the request is not waiting. This is only +// applicable to a request that has been assigned to a queue. +func (qs *queueSet) cancelWait(req *request) { + qs.lock.Lock() + defer qs.lock.Unlock() + if req.decision.IsSetLocked() { + // The request has already been removed from the queue + // and so we consider its wait to be over. + return + } + req.decision.SetLocked(decisionCancel) + queue := req.queue + // remove the request from the queue as it has timed out + for i := range queue.requests { + if req == queue.requests[i] { + // remove the request + queue.requests = append(queue.requests[:i], queue.requests[i+1:]...) + qs.totRequestsWaiting-- + metrics.AddRequestsInQueues(qs.qCfg.Name, req.fsName, -1) + req.NoteQueued(false) + qs.obsPair.RequestsWaiting.Add(-1) + break + } + } + return +} + +// selectQueueLocked examines the queues in round robin order and +// returns the first one of those for which the virtual finish time of +// the oldest waiting request is minimal. +func (qs *queueSet) selectQueueLocked() *queue { + minVirtualFinish := math.Inf(1) + var minQueue *queue + var minIndex int + nq := len(qs.queues) + for range qs.queues { + qs.robinIndex = (qs.robinIndex + 1) % nq + queue := qs.queues[qs.robinIndex] + if len(queue.requests) != 0 { + + currentVirtualFinish := queue.GetVirtualFinish(0, qs.estimatedServiceTime) + if currentVirtualFinish < minVirtualFinish { + minVirtualFinish = currentVirtualFinish + minQueue = queue + minIndex = qs.robinIndex + } + } + } + // we set the round robin indexing to start at the chose queue + // for the next round. This way the non-selected queues + // win in the case that the virtual finish times are the same + qs.robinIndex = minIndex + // according to the original FQ formula: + // + // Si = MAX(R(t), Fi-1) + // + // the virtual start (excluding the estimated cost) of the chose + // queue should always be greater or equal to the global virtual + // time. + // + // hence we're refreshing the per-queue virtual time for the chosen + // queue here. if the last virtual start time (excluded estimated cost) + // falls behind the global virtual time, we update the latest virtual + // start by: + + previouslyEstimatedServiceTime := float64(minQueue.requestsExecuting) * qs.estimatedServiceTime + if qs.virtualTime > minQueue.virtualStart-previouslyEstimatedServiceTime { + // per-queue virtual time should not fall behind the global + minQueue.virtualStart = qs.virtualTime + previouslyEstimatedServiceTime + } + return minQueue +} + +// finishRequestAndDispatchAsMuchAsPossible is a convenience method +// which calls finishRequest for a given request and then dispatches +// as many requests as possible. This is all of what needs to be done +// once a request finishes execution or is canceled. This returns a bool +// indicating whether the QueueSet is now idle. +func (qs *queueSet) finishRequestAndDispatchAsMuchAsPossible(req *request) bool { + qs.lockAndSyncTime() + defer qs.lock.Unlock() + + qs.finishRequestLocked(req) + qs.dispatchAsMuchAsPossibleLocked() + return qs.isIdleLocked() +} + +// finishRequestLocked is a callback that should be used when a +// previously dispatched request has completed it's service. This +// callback updates important state in the queueSet +func (qs *queueSet) finishRequestLocked(r *request) { + now := qs.clock.Now() + qs.totRequestsExecuting-- + metrics.AddRequestsExecuting(qs.qCfg.Name, r.fsName, -1) + qs.obsPair.RequestsExecuting.Add(-1) + + if r.queue == nil { + if klog.V(6).Enabled() { + klog.Infof("QS(%s) at r=%s v=%.9fs: request %#+v %#+v finished, qs will have %d executing", qs.qCfg.Name, now.Format(nsTimeFmt), qs.virtualTime, r.descr1, r.descr2, qs.totRequestsExecuting) + } + return + } + + S := now.Sub(r.startTime).Seconds() + + // When a request finishes being served, and the actual service time was S, + // the queue’s virtual start time is decremented by G - S. + r.queue.virtualStart -= qs.estimatedServiceTime - S + + // request has finished, remove from requests executing + r.queue.requestsExecuting-- + + if klog.V(6).Enabled() { + klog.Infof("QS(%s) at r=%s v=%.9fs: request %#+v %#+v finished, adjusted queue %d virtual start time to %.9fs due to service time %.9fs, queue will have %d waiting & %d executing", qs.qCfg.Name, now.Format(nsTimeFmt), qs.virtualTime, r.descr1, r.descr2, r.queue.index, r.queue.virtualStart, S, len(r.queue.requests), r.queue.requestsExecuting) + } + + // If there are more queues than desired and this one has no + // requests then remove it + if len(qs.queues) > qs.qCfg.DesiredNumQueues && + len(r.queue.requests) == 0 && + r.queue.requestsExecuting == 0 { + qs.queues = removeQueueAndUpdateIndexes(qs.queues, r.queue.index) + + // decrement here to maintain the invariant that (qs.robinIndex+1) % numQueues + // is the index of the next queue after the one last dispatched from + if qs.robinIndex >= r.queue.index { + qs.robinIndex-- + } + } +} + +// removeQueueAndUpdateIndexes uses reslicing to remove an index from a slice +// and then updates the 'index' field of the queues to be correct +func removeQueueAndUpdateIndexes(queues []*queue, index int) []*queue { + keptQueues := append(queues[:index], queues[index+1:]...) + for i := index; i < len(keptQueues); i++ { + keptQueues[i].index-- + } + return keptQueues +} + +// preCreateOrUnblockGoroutine needs to be called before creating a +// goroutine associated with this queueSet or unblocking a blocked +// one, to properly update the accounting used in testing. +func (qs *queueSet) preCreateOrUnblockGoroutine() { + qs.counter.Add(1) +} + +// goroutineDoneOrBlocked needs to be called at the end of every +// goroutine associated with this queueSet or when such a goroutine is +// about to wait on some other goroutine to do something; this is to +// properly update the accounting used in testing. +func (qs *queueSet) goroutineDoneOrBlocked() { + qs.counter.Add(-1) +} + +func (qs *queueSet) UpdateObservations() { + qs.obsPair.RequestsWaiting.Add(0) + qs.obsPair.RequestsExecuting.Add(0) +} + +func (qs *queueSet) Dump(includeRequestDetails bool) debug.QueueSetDump { + qs.lock.Lock() + defer qs.lock.Unlock() + d := debug.QueueSetDump{ + Queues: make([]debug.QueueDump, len(qs.queues)), + Waiting: qs.totRequestsWaiting, + Executing: qs.totRequestsExecuting, + } + for i, q := range qs.queues { + d.Queues[i] = q.dump(includeRequestDetails) + } + return d +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go new file mode 100644 index 0000000000..a720230600 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go @@ -0,0 +1,128 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queueset + +import ( + "context" + "time" + + genericrequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/util/flowcontrol/debug" + fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" + "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise" +) + +// request is a temporary container for "requests" with additional +// tracking fields required for the functionality FQScheduler +type request struct { + ctx context.Context + + qs *queueSet + + flowDistinguisher string + fsName string + + // The relevant queue. Is nil if this request did not go through + // a queue. + queue *queue + + // startTime is the real time when the request began executing + startTime time.Time + + // decision gets set to a `requestDecision` indicating what to do + // with this request. It gets set exactly once, when the request + // is removed from its queue. The value will be decisionReject, + // decisionCancel, or decisionExecute; decisionTryAnother never + // appears here. + decision promise.LockingWriteOnce + + // arrivalTime is the real time when the request entered this system + arrivalTime time.Time + + // descr1 and descr2 are not used in any logic but they appear in + // log messages + descr1, descr2 interface{} + + // Indicates whether client has called Request::Wait() + waitStarted bool + + queueNoteFn fq.QueueNoteFn +} + +// queue is an array of requests with additional metadata required for +// the FQScheduler +type queue struct { + requests []*request + + // virtualStart is the virtual time (virtual seconds since process + // startup) when the oldest request in the queue (if there is any) + // started virtually executing + virtualStart float64 + + requestsExecuting int + index int +} + +// Enqueue enqueues a request into the queue +func (q *queue) Enqueue(request *request) { + q.requests = append(q.requests, request) +} + +// Dequeue dequeues a request from the queue +func (q *queue) Dequeue() (*request, bool) { + if len(q.requests) == 0 { + return nil, false + } + request := q.requests[0] + q.requests = q.requests[1:] + return request, true +} + +// GetVirtualFinish returns the expected virtual finish time of the request at +// index J in the queue with estimated finish time G +func (q *queue) GetVirtualFinish(J int, G float64) float64 { + // The virtual finish time of request number J in the queue + // (counting from J=1 for the head) is J * G + (virtual start time). + + // counting from J=1 for the head (eg: queue.requests[0] -> J=1) - J+1 + jg := float64(J+1) * float64(G) + return jg + q.virtualStart +} + +func (q *queue) dump(includeDetails bool) debug.QueueDump { + digest := make([]debug.RequestDump, len(q.requests)) + for i, r := range q.requests { + // dump requests. + digest[i].MatchedFlowSchema = r.fsName + digest[i].FlowDistinguisher = r.flowDistinguisher + digest[i].ArriveTime = r.arrivalTime + digest[i].StartTime = r.startTime + if includeDetails { + userInfo, _ := genericrequest.UserFrom(r.ctx) + digest[i].UserName = userInfo.GetName() + requestInfo, ok := genericrequest.RequestInfoFrom(r.ctx) + if ok { + digest[i].RequestInfo = *requestInfo + } + } + } + return debug.QueueDump{ + VirtualStart: q.virtualStart, + Requests: digest, + ExecutingRequests: q.requestsExecuting, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go new file mode 100644 index 0000000000..61ae65df96 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go @@ -0,0 +1,231 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package format + +import ( + "bytes" + "encoding/json" + "fmt" + + flowcontrol "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/request" +) + +// This file provides an easy way to mark a value for formatting to +// `%s` in full detail IF it is printed but without costing a lot of +// CPU or memory if the value is NOT printed. The API Priority and +// Fairness API objects are formatted into JSON. The other types of +// objects here are formatted into golang source. + +// Stringer marks the given value for custom formatting by this package. +type Stringer struct{ val interface{} } + +// Fmt marks the given value for custom formatting by this package. +func Fmt(val interface{}) Stringer { + return Stringer{val} +} + +// String formats to a string in full detail +func (sr Stringer) String() string { + if sr.val == nil { + return "nil" + } + switch typed := sr.val.(type) { + case *flowcontrol.FlowSchema, + flowcontrol.FlowSchema, + flowcontrol.FlowSchemaSpec, + flowcontrol.FlowDistinguisherMethod, + *flowcontrol.FlowDistinguisherMethod, + *flowcontrol.PolicyRulesWithSubjects, + flowcontrol.PolicyRulesWithSubjects, + flowcontrol.Subject, + flowcontrol.ResourcePolicyRule, + flowcontrol.NonResourcePolicyRule, + flowcontrol.FlowSchemaCondition, + *flowcontrol.PriorityLevelConfiguration, + flowcontrol.PriorityLevelConfiguration, + flowcontrol.PriorityLevelConfigurationSpec, + *flowcontrol.LimitedPriorityLevelConfiguration, + flowcontrol.LimitedPriorityLevelConfiguration, + flowcontrol.LimitResponse, + *flowcontrol.QueuingConfiguration, + flowcontrol.QueuingConfiguration: + return ToJSON(sr.val) + case []user.Info: + return FmtUsers(typed) + case []*request.RequestInfo: + return FmtRequests(typed) + default: + return fmt.Sprintf("%#+v", sr.val) + } +} + +// ToJSON converts using encoding/json and handles errors by +// formatting them +func ToJSON(val interface{}) string { + bs, err := json.Marshal(val) + str := string(bs) + if err != nil { + str = str + "<" + err.Error() + ">" + } + return str +} + +// FmtPriorityLevelConfiguration returns a golang source expression +// equivalent to the given value +func FmtPriorityLevelConfiguration(pl *flowcontrol.PriorityLevelConfiguration) string { + if pl == nil { + return "nil" + } + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("&flowcontrolv1beta1.PriorityLevelConfiguration{ObjectMeta: %#+v, Spec: ", + pl.ObjectMeta)) + BufferPriorityLevelConfigurationSpec(&buf, &pl.Spec) + buf.WriteString(fmt.Sprintf(", Status: %#+v}", pl.Status)) + return buf.String() +} + +// FmtPriorityLevelConfigurationSpec returns a golang source +// expression equivalent to the given value +func FmtPriorityLevelConfigurationSpec(plSpec *flowcontrol.PriorityLevelConfigurationSpec) string { + var buf bytes.Buffer + BufferPriorityLevelConfigurationSpec(&buf, plSpec) + return buf.String() +} + +// BufferPriorityLevelConfigurationSpec writes a golang source +// expression for the given value to the given buffer +func BufferPriorityLevelConfigurationSpec(buf *bytes.Buffer, plSpec *flowcontrol.PriorityLevelConfigurationSpec) { + buf.WriteString(fmt.Sprintf("flowcontrolv1beta1.PriorityLevelConfigurationSpec{Type: %#v", plSpec.Type)) + if plSpec.Limited != nil { + buf.WriteString(fmt.Sprintf(", Limited: &flowcontrol.LimitedPriorityLevelConfiguration{AssuredConcurrencyShares:%d, LimitResponse:flowcontrol.LimitResponse{Type:%#v", plSpec.Limited.AssuredConcurrencyShares, plSpec.Limited.LimitResponse.Type)) + if plSpec.Limited.LimitResponse.Queuing != nil { + buf.WriteString(fmt.Sprintf(", Queuing:&%#+v", *plSpec.Limited.LimitResponse.Queuing)) + } + buf.WriteString(" } }") + } + buf.WriteString("}") +} + +// FmtFlowSchema produces a golang source expression of the value. +func FmtFlowSchema(fs *flowcontrol.FlowSchema) string { + if fs == nil { + return "nil" + } + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("&flowcontrolv1beta1.FlowSchema{ObjectMeta: %#+v, Spec: ", + fs.ObjectMeta)) + BufferFlowSchemaSpec(&buf, &fs.Spec) + buf.WriteString(fmt.Sprintf(", Status: %#+v}", fs.Status)) + return buf.String() +} + +// FmtFlowSchemaSpec produces a golang source expression equivalent to +// the given spec +func FmtFlowSchemaSpec(fsSpec *flowcontrol.FlowSchemaSpec) string { + var buf bytes.Buffer + BufferFlowSchemaSpec(&buf, fsSpec) + return buf.String() +} + +// BufferFlowSchemaSpec writes a golang source expression for the +// given value to the given buffer +func BufferFlowSchemaSpec(buf *bytes.Buffer, fsSpec *flowcontrol.FlowSchemaSpec) { + buf.WriteString(fmt.Sprintf("flowcontrolv1beta1.FlowSchemaSpec{PriorityLevelConfiguration: %#+v, MatchingPrecedence: %d, DistinguisherMethod: ", + fsSpec.PriorityLevelConfiguration, + fsSpec.MatchingPrecedence)) + if fsSpec.DistinguisherMethod == nil { + buf.WriteString("nil") + } else { + buf.WriteString(fmt.Sprintf("&%#+v", *fsSpec.DistinguisherMethod)) + } + buf.WriteString(", Rules: []flowcontrol.PolicyRulesWithSubjects{") + for idx, rule := range fsSpec.Rules { + if idx > 0 { + buf.WriteString(", ") + } + BufferFmtPolicyRulesWithSubjectsSlim(buf, rule) + } + buf.WriteString("}}") +} + +// FmtPolicyRulesWithSubjects produces a golang source expression of the value. +func FmtPolicyRulesWithSubjects(rule flowcontrol.PolicyRulesWithSubjects) string { + return "flowcontrolv1beta1.PolicyRulesWithSubjects" + FmtPolicyRulesWithSubjectsSlim(rule) +} + +// FmtPolicyRulesWithSubjectsSlim produces a golang source expression +// of the value but without the leading type name. See above for an +// example context where this is useful. +func FmtPolicyRulesWithSubjectsSlim(rule flowcontrol.PolicyRulesWithSubjects) string { + var buf bytes.Buffer + BufferFmtPolicyRulesWithSubjectsSlim(&buf, rule) + return buf.String() +} + +// BufferFmtPolicyRulesWithSubjectsSlim writes a golang source +// expression for the given value to the given buffer but excludes the +// leading type name +func BufferFmtPolicyRulesWithSubjectsSlim(buf *bytes.Buffer, rule flowcontrol.PolicyRulesWithSubjects) { + buf.WriteString("{Subjects: []flowcontrolv1beta1.Subject{") + for jdx, subj := range rule.Subjects { + if jdx > 0 { + buf.WriteString(", ") + } + buf.WriteString(fmt.Sprintf("{Kind: %q", subj.Kind)) + if subj.User != nil { + buf.WriteString(fmt.Sprintf(", User: &%#+v", *subj.User)) + } + if subj.Group != nil { + buf.WriteString(fmt.Sprintf(", Group: &%#+v", *subj.Group)) + } + if subj.ServiceAccount != nil { + buf.WriteString(fmt.Sprintf(", ServiceAcount: &%#+v", *subj.ServiceAccount)) + } + buf.WriteString("}") + } + buf.WriteString(fmt.Sprintf("}, ResourceRules: %#+v, NonResourceRules: %#+v}", rule.ResourceRules, rule.NonResourceRules)) +} + +// FmtUsers produces a golang source expression of the value. +func FmtUsers(list []user.Info) string { + var buf bytes.Buffer + buf.WriteString("[]user.Info{") + for idx, member := range list { + if idx > 0 { + buf.WriteString(", ") + } + buf.WriteString(fmt.Sprintf("%#+v", member)) + } + buf.WriteString("}") + return buf.String() +} + +// FmtRequests produces a golang source expression of the value. +func FmtRequests(list []*request.RequestInfo) string { + var buf bytes.Buffer + buf.WriteString("[]*request.RequestInfo{") + for idx, member := range list { + if idx > 0 { + buf.WriteString(", ") + } + buf.WriteString(fmt.Sprintf("%#+v", member)) + } + buf.WriteString("}") + return buf.String() +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/formatting.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/formatting.go new file mode 100644 index 0000000000..5b5b367bd9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/formatting.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "fmt" + + fcfmt "k8s.io/apiserver/pkg/util/flowcontrol/format" +) + +var _ fmt.GoStringer = RequestDigest{} + +// GoString produces a golang source expression of the value. +func (rd RequestDigest) GoString() string { + return fmt.Sprintf("RequestDigest{RequestInfo: %#+v, User: %#+v}", rd.RequestInfo, rd.User) +} + +var _ fmt.GoStringer = (*priorityLevelState)(nil) + +// GoString produces a golang source expression of the value. +func (pls *priorityLevelState) GoString() string { + if pls == nil { + return "nil" + } + return fmt.Sprintf("&priorityLevelState{pl:%s, qsCompleter:%#+v, queues:%#+v, quiescing:%#v, numPending:%d}", fcfmt.Fmt(pls.pl), pls.qsCompleter, pls.queues, pls.quiescing, pls.numPending) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go new file mode 100644 index 0000000000..bdbaa94601 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go @@ -0,0 +1,261 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "strings" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + basemetricstestutil "k8s.io/component-base/metrics/testutil" +) + +const ( + namespace = "apiserver" + subsystem = "flowcontrol" +) + +const ( + requestKind = "request_kind" + priorityLevel = "priority_level" + flowSchema = "flow_schema" + phase = "phase" + mark = "mark" +) + +var ( + queueLengthBuckets = []float64{0, 10, 25, 50, 100, 250, 500, 1000} + requestDurationSecondsBuckets = []float64{0, 0.005, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 30} +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + registerMetrics.Do(func() { + for _, metric := range metrics { + legacyregistry.MustRegister(metric) + } + }) +} + +type resettable interface { + Reset() +} + +// Reset all metrics to zero +func Reset() { + for _, metric := range metrics { + rm := metric.(resettable) + rm.Reset() + } +} + +// GatherAndCompare the given metrics with the given Prometheus syntax expected value +func GatherAndCompare(expected string, metricNames ...string) error { + return basemetricstestutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(expected), metricNames...) +} + +// Registerables is a slice of Registerable +type Registerables []compbasemetrics.Registerable + +// Append adds more +func (rs Registerables) Append(more ...compbasemetrics.Registerable) Registerables { + return append(rs, more...) +} + +var ( + apiserverRejectedRequestsTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "rejected_requests_total", + Help: "Number of requests rejected by API Priority and Fairness system", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema, "reason"}, + ) + apiserverDispatchedRequestsTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dispatched_requests_total", + Help: "Number of requests released by API Priority and Fairness system for service", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) + + // PriorityLevelConcurrencyObserverPairGenerator creates pairs that observe concurrency for priority levels + PriorityLevelConcurrencyObserverPairGenerator = NewSampleAndWaterMarkHistogramsPairGenerator(clock.RealClock{}, time.Millisecond, + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "priority_level_request_count_samples", + Help: "Periodic observations of the number of requests", + Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, + StabilityLevel: compbasemetrics.ALPHA, + }, + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "priority_level_request_count_watermarks", + Help: "Watermarks of the number of requests", + Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel}) + + // ReadWriteConcurrencyObserverPairGenerator creates pairs that observe concurrency broken down by mutating vs readonly + ReadWriteConcurrencyObserverPairGenerator = NewSampleAndWaterMarkHistogramsPairGenerator(clock.RealClock{}, time.Millisecond, + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "read_vs_write_request_count_samples", + Help: "Periodic observations of the number of requests", + Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, + StabilityLevel: compbasemetrics.ALPHA, + }, + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "read_vs_write_request_count_watermarks", + Help: "Watermarks of the number of requests", + Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{requestKind}) + + apiserverCurrentInqueueRequests = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "current_inqueue_requests", + Help: "Number of requests currently pending in queues of the API Priority and Fairness system", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) + apiserverRequestQueueLength = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "request_queue_length_after_enqueue", + Help: "Length of queue in the API Priority and Fairness system, as seen by each request after it is enqueued", + Buckets: queueLengthBuckets, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) + apiserverRequestConcurrencyLimit = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "request_concurrency_limit", + Help: "Shared concurrency limit in the API Priority and Fairness system", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel}, + ) + apiserverCurrentExecutingRequests = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "current_executing_requests", + Help: "Number of requests currently executing in the API Priority and Fairness system", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) + apiserverRequestWaitingSeconds = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "request_wait_duration_seconds", + Help: "Length of time a request spent waiting in its queue", + Buckets: requestDurationSecondsBuckets, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema, "execute"}, + ) + apiserverRequestExecutionSeconds = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "request_execution_seconds", + Help: "Duration of request execution in the API Priority and Fairness system", + Buckets: requestDurationSecondsBuckets, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) + metrics = Registerables{ + apiserverRejectedRequestsTotal, + apiserverDispatchedRequestsTotal, + apiserverCurrentInqueueRequests, + apiserverRequestQueueLength, + apiserverRequestConcurrencyLimit, + apiserverCurrentExecutingRequests, + apiserverRequestWaitingSeconds, + apiserverRequestExecutionSeconds, + }. + Append(PriorityLevelConcurrencyObserverPairGenerator.metrics()...). + Append(ReadWriteConcurrencyObserverPairGenerator.metrics()...) +) + +// AddRequestsInQueues adds the given delta to the gauge of the # of requests in the queues of the specified flowSchema and priorityLevel +func AddRequestsInQueues(priorityLevel, flowSchema string, delta int) { + apiserverCurrentInqueueRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) +} + +// AddRequestsExecuting adds the given delta to the gauge of executing requests of the given flowSchema and priorityLevel +func AddRequestsExecuting(priorityLevel, flowSchema string, delta int) { + apiserverCurrentExecutingRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) +} + +// UpdateSharedConcurrencyLimit updates the value for the concurrency limit in flow control +func UpdateSharedConcurrencyLimit(priorityLevel string, limit int) { + apiserverRequestConcurrencyLimit.WithLabelValues(priorityLevel).Set(float64(limit)) +} + +// AddReject increments the # of rejected requests for flow control +func AddReject(priorityLevel, flowSchema, reason string) { + apiserverRejectedRequestsTotal.WithLabelValues(priorityLevel, flowSchema, reason).Add(1) +} + +// AddDispatch increments the # of dispatched requests for flow control +func AddDispatch(priorityLevel, flowSchema string) { + apiserverDispatchedRequestsTotal.WithLabelValues(priorityLevel, flowSchema).Add(1) +} + +// ObserveQueueLength observes the queue length for flow control +func ObserveQueueLength(priorityLevel, flowSchema string, length int) { + apiserverRequestQueueLength.WithLabelValues(priorityLevel, flowSchema).Observe(float64(length)) +} + +// ObserveWaitingDuration observes the queue length for flow control +func ObserveWaitingDuration(priorityLevel, flowSchema, execute string, waitTime time.Duration) { + apiserverRequestWaitingSeconds.WithLabelValues(priorityLevel, flowSchema, execute).Observe(waitTime.Seconds()) +} + +// ObserveExecutionDuration observes the execution duration for flow control +func ObserveExecutionDuration(priorityLevel, flowSchema string, executionTime time.Duration) { + apiserverRequestExecutionSeconds.WithLabelValues(priorityLevel, flowSchema).Observe(executionTime.Seconds()) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go new file mode 100644 index 0000000000..aade70093a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go @@ -0,0 +1,211 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/klog/v2" +) + +const ( + labelNameMark = "mark" + labelValueLo = "low" + labelValueHi = "high" + labelNamePhase = "phase" + labelValueWaiting = "waiting" + labelValueExecuting = "executing" +) + +// SampleAndWaterMarkPairGenerator makes pairs of TimedObservers that +// track samples and watermarks. +type SampleAndWaterMarkPairGenerator struct { + urGenerator SampleAndWaterMarkObserverGenerator +} + +var _ TimedObserverPairGenerator = SampleAndWaterMarkPairGenerator{} + +// NewSampleAndWaterMarkHistogramsPairGenerator makes a new pair generator +func NewSampleAndWaterMarkHistogramsPairGenerator(clock clock.PassiveClock, samplePeriod time.Duration, sampleOpts, waterMarkOpts *compbasemetrics.HistogramOpts, labelNames []string) SampleAndWaterMarkPairGenerator { + return SampleAndWaterMarkPairGenerator{ + urGenerator: NewSampleAndWaterMarkHistogramsGenerator(clock, samplePeriod, sampleOpts, waterMarkOpts, append([]string{labelNamePhase}, labelNames...)), + } +} + +// Generate makes a new pair +func (spg SampleAndWaterMarkPairGenerator) Generate(waiting1, executing1 float64, labelValues []string) TimedObserverPair { + return TimedObserverPair{ + RequestsWaiting: spg.urGenerator.Generate(0, waiting1, append([]string{labelValueWaiting}, labelValues...)), + RequestsExecuting: spg.urGenerator.Generate(0, executing1, append([]string{labelValueExecuting}, labelValues...)), + } +} + +func (spg SampleAndWaterMarkPairGenerator) metrics() Registerables { + return spg.urGenerator.metrics() +} + +// SampleAndWaterMarkObserverGenerator creates TimedObservers that +// populate histograms of samples and low- and high-water-marks. The +// generator has a samplePeriod, and the histograms get an observation +// every samplePeriod. The sampling windows are quantized based on +// the monotonic rather than wall-clock times. The `t0` field is +// there so to provide a baseline for monotonic clock differences. +type SampleAndWaterMarkObserverGenerator struct { + *sampleAndWaterMarkObserverGenerator +} + +type sampleAndWaterMarkObserverGenerator struct { + clock clock.PassiveClock + t0 time.Time + samplePeriod time.Duration + samples *compbasemetrics.HistogramVec + waterMarks *compbasemetrics.HistogramVec +} + +var _ TimedObserverGenerator = (*sampleAndWaterMarkObserverGenerator)(nil) + +// NewSampleAndWaterMarkHistogramsGenerator makes a new one +func NewSampleAndWaterMarkHistogramsGenerator(clock clock.PassiveClock, samplePeriod time.Duration, sampleOpts, waterMarkOpts *compbasemetrics.HistogramOpts, labelNames []string) SampleAndWaterMarkObserverGenerator { + return SampleAndWaterMarkObserverGenerator{ + &sampleAndWaterMarkObserverGenerator{ + clock: clock, + t0: clock.Now(), + samplePeriod: samplePeriod, + samples: compbasemetrics.NewHistogramVec(sampleOpts, labelNames), + waterMarks: compbasemetrics.NewHistogramVec(waterMarkOpts, append([]string{labelNameMark}, labelNames...)), + }} +} + +func (swg *sampleAndWaterMarkObserverGenerator) quantize(when time.Time) int64 { + return int64(when.Sub(swg.t0) / swg.samplePeriod) +} + +// Generate makes a new TimedObserver +func (swg *sampleAndWaterMarkObserverGenerator) Generate(x, x1 float64, labelValues []string) TimedObserver { + relX := x / x1 + when := swg.clock.Now() + return &sampleAndWaterMarkHistograms{ + sampleAndWaterMarkObserverGenerator: swg, + labelValues: labelValues, + loLabelValues: append([]string{labelValueLo}, labelValues...), + hiLabelValues: append([]string{labelValueHi}, labelValues...), + x1: x1, + sampleAndWaterMarkAccumulator: sampleAndWaterMarkAccumulator{ + lastSet: when, + lastSetInt: swg.quantize(when), + x: x, + relX: relX, + loRelX: relX, + hiRelX: relX, + }} +} + +func (swg *sampleAndWaterMarkObserverGenerator) metrics() Registerables { + return Registerables{swg.samples, swg.waterMarks} +} + +type sampleAndWaterMarkHistograms struct { + *sampleAndWaterMarkObserverGenerator + labelValues []string + loLabelValues, hiLabelValues []string + + sync.Mutex + x1 float64 + sampleAndWaterMarkAccumulator +} + +type sampleAndWaterMarkAccumulator struct { + lastSet time.Time + lastSetInt int64 // lastSet / samplePeriod + x float64 + relX float64 // x / x1 + loRelX, hiRelX float64 +} + +var _ TimedObserver = (*sampleAndWaterMarkHistograms)(nil) + +func (saw *sampleAndWaterMarkHistograms) Add(deltaX float64) { + saw.innerSet(func() { + saw.x += deltaX + }) +} + +func (saw *sampleAndWaterMarkHistograms) Set(x float64) { + saw.innerSet(func() { + saw.x = x + }) +} + +func (saw *sampleAndWaterMarkHistograms) SetX1(x1 float64) { + saw.innerSet(func() { + saw.x1 = x1 + }) +} + +func (saw *sampleAndWaterMarkHistograms) innerSet(updateXOrX1 func()) { + var when time.Time + var whenInt int64 + var acc sampleAndWaterMarkAccumulator + var wellOrdered bool + func() { + saw.Lock() + defer saw.Unlock() + when = saw.clock.Now() + whenInt = saw.quantize(when) + acc = saw.sampleAndWaterMarkAccumulator + wellOrdered = !when.Before(acc.lastSet) + updateXOrX1() + saw.relX = saw.x / saw.x1 + if wellOrdered { + if acc.lastSetInt < whenInt { + saw.loRelX, saw.hiRelX = acc.relX, acc.relX + saw.lastSetInt = whenInt + } + saw.lastSet = when + } + // `wellOrdered` should always be true because we are using + // monotonic clock readings and they never go backwards. Yet + // very small backwards steps (under 1 microsecond) have been + // observed + // (https://github.com/kubernetes/kubernetes/issues/96459). + // In the backwards case, treat the current reading as if it + // had occurred at time `saw.lastSet` and log an error. It + // would be wrong to update `saw.lastSet` in this case because + // that plants a time bomb for future updates to + // `saw.lastSetInt`. + if saw.relX < saw.loRelX { + saw.loRelX = saw.relX + } else if saw.relX > saw.hiRelX { + saw.hiRelX = saw.relX + } + }() + if !wellOrdered { + lastSetS := acc.lastSet.String() + whenS := when.String() + klog.Errorf("Time went backwards from %s to %s for labelValues=%#+v", lastSetS, whenS, saw.labelValues) + } + for acc.lastSetInt < whenInt { + saw.samples.WithLabelValues(saw.labelValues...).Observe(acc.relX) + saw.waterMarks.WithLabelValues(saw.loLabelValues...).Observe(acc.loRelX) + saw.waterMarks.WithLabelValues(saw.hiLabelValues...).Observe(acc.hiRelX) + acc.lastSetInt++ + acc.loRelX, acc.hiRelX = acc.relX, acc.relX + } +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go new file mode 100644 index 0000000000..25f41493c3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +// TimedObserver gets informed about the values assigned to a variable +// `X float64` over time, and reports on the ratio `X/X1`. +type TimedObserver interface { + // Add notes a change to the variable + Add(deltaX float64) + + // Set notes a setting of the variable + Set(x float64) + + // SetX1 changes the value to use for X1 + SetX1(x1 float64) +} + +// TimedObserverGenerator creates related observers that are +// differentiated by a series of label values +type TimedObserverGenerator interface { + Generate(x, x1 float64, labelValues []string) TimedObserver +} + +// TimedObserverPair is a corresponding pair of observers, one for the +// number of requests waiting in queue(s) and one for the number of +// requests being executed +type TimedObserverPair struct { + // RequestsWaiting is given observations of the number of currently queued requests + RequestsWaiting TimedObserver + + // RequestsExecuting is given observations of the number of requests currently executing + RequestsExecuting TimedObserver +} + +// TimedObserverPairGenerator generates pairs +type TimedObserverPairGenerator interface { + Generate(waiting1, executing1 float64, labelValues []string) TimedObserverPair +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go new file mode 100644 index 0000000000..765e28790f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go @@ -0,0 +1,203 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "strings" + + flowcontrol "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/request" +) + +// Tests whether a given request and FlowSchema match. Nobody mutates +// either input. +func matchesFlowSchema(digest RequestDigest, flowSchema *flowcontrol.FlowSchema) bool { + for _, policyRule := range flowSchema.Spec.Rules { + if matchesPolicyRule(digest, &policyRule) { + return true + } + } + return false +} + +func matchesPolicyRule(digest RequestDigest, policyRule *flowcontrol.PolicyRulesWithSubjects) bool { + if !matchesASubject(digest.User, policyRule.Subjects) { + return false + } + if digest.RequestInfo.IsResourceRequest { + return matchesAResourceRule(digest.RequestInfo, policyRule.ResourceRules) + } + return matchesANonResourceRule(digest.RequestInfo, policyRule.NonResourceRules) +} + +func matchesASubject(user user.Info, subjects []flowcontrol.Subject) bool { + for _, subject := range subjects { + if matchesSubject(user, subject) { + return true + } + } + return false +} + +func matchesSubject(user user.Info, subject flowcontrol.Subject) bool { + switch subject.Kind { + case flowcontrol.SubjectKindUser: + return subject.User != nil && (subject.User.Name == flowcontrol.NameAll || subject.User.Name == user.GetName()) + case flowcontrol.SubjectKindGroup: + if subject.Group == nil { + return false + } + seek := subject.Group.Name + if seek == "*" { + return true + } + for _, userGroup := range user.GetGroups() { + if userGroup == seek { + return true + } + } + return false + case flowcontrol.SubjectKindServiceAccount: + if subject.ServiceAccount == nil { + return false + } + if subject.ServiceAccount.Name == flowcontrol.NameAll { + return serviceAccountMatchesNamespace(subject.ServiceAccount.Namespace, user.GetName()) + } + return serviceaccount.MatchesUsername(subject.ServiceAccount.Namespace, subject.ServiceAccount.Name, user.GetName()) + default: + return false + } +} + +// serviceAccountMatchesNamespace checks whether the provided service account username matches the namespace, without +// allocating. Use this when checking a service account namespace against a known string. +// This is copied from `k8s.io/apiserver/pkg/authentication/serviceaccount::MatchesUsername` and simplified to not check the name part. +func serviceAccountMatchesNamespace(namespace string, username string) bool { + const ( + ServiceAccountUsernamePrefix = "system:serviceaccount:" + ServiceAccountUsernameSeparator = ":" + ) + if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) { + return false + } + username = username[len(ServiceAccountUsernamePrefix):] + + if !strings.HasPrefix(username, namespace) { + return false + } + username = username[len(namespace):] + + return strings.HasPrefix(username, ServiceAccountUsernameSeparator) +} + +func matchesAResourceRule(ri *request.RequestInfo, rules []flowcontrol.ResourcePolicyRule) bool { + for _, rr := range rules { + if matchesResourcePolicyRule(ri, rr) { + return true + } + } + return false +} + +func matchesResourcePolicyRule(ri *request.RequestInfo, policyRule flowcontrol.ResourcePolicyRule) bool { + if !matchPolicyRuleVerb(policyRule.Verbs, ri.Verb) { + return false + } + if !matchPolicyRuleResource(policyRule.Resources, ri.Resource, ri.Subresource) { + return false + } + if !matchPolicyRuleAPIGroup(policyRule.APIGroups, ri.APIGroup) { + return false + } + if len(ri.Namespace) == 0 { + return policyRule.ClusterScope + } + return containsString(ri.Namespace, policyRule.Namespaces, flowcontrol.NamespaceEvery) +} + +func matchesANonResourceRule(ri *request.RequestInfo, rules []flowcontrol.NonResourcePolicyRule) bool { + for _, rr := range rules { + if matchesNonResourcePolicyRule(ri, rr) { + return true + } + } + return false +} + +func matchesNonResourcePolicyRule(ri *request.RequestInfo, policyRule flowcontrol.NonResourcePolicyRule) bool { + if !matchPolicyRuleVerb(policyRule.Verbs, ri.Verb) { + return false + } + return matchPolicyRuleNonResourceURL(policyRule.NonResourceURLs, ri.Path) +} + +func matchPolicyRuleVerb(policyRuleVerbs []string, requestVerb string) bool { + return containsString(requestVerb, policyRuleVerbs, flowcontrol.VerbAll) +} + +func matchPolicyRuleNonResourceURL(policyRuleRequestURLs []string, requestPath string) bool { + for _, rulePath := range policyRuleRequestURLs { + if rulePath == flowcontrol.NonResourceAll || rulePath == requestPath { + return true + } + rulePrefix := strings.TrimSuffix(rulePath, "*") + if !strings.HasSuffix(rulePrefix, "/") { + rulePrefix = rulePrefix + "/" + } + if strings.HasPrefix(requestPath, rulePrefix) { + return true + } + } + return false +} + +func matchPolicyRuleAPIGroup(policyRuleAPIGroups []string, requestAPIGroup string) bool { + return containsString(requestAPIGroup, policyRuleAPIGroups, flowcontrol.APIGroupAll) +} + +func rsJoin(requestResource, requestSubresource string) string { + seekString := requestResource + if requestSubresource != "" { + seekString = requestResource + "/" + requestSubresource + } + return seekString +} + +func matchPolicyRuleResource(policyRuleRequestResources []string, requestResource, requestSubresource string) bool { + return containsString(rsJoin(requestResource, requestSubresource), policyRuleRequestResources, flowcontrol.ResourceAll) +} + +// containsString returns true if either `x` or `wildcard` is in +// `list`. The wildcard is not a pattern to match against `x`; rather +// the presence of the wildcard in the list is the caller's way of +// saying that all values of `x` should match the list. This function +// assumes that if `wildcard` is in `list` then it is the only member +// of the list, which is enforced by validation. +func containsString(x string, list []string, wildcard string) bool { + if len(list) == 1 && list[0] == wildcard { + return true + } + for _, y := range list { + if x == y { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flushwriter/doc.go b/vendor/k8s.io/apiserver/pkg/util/flushwriter/doc.go new file mode 100644 index 0000000000..f81e09a29c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flushwriter/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package flushwriter implements a wrapper for a writer that flushes on every +// write if that writer implements the io.Flusher interface +package flushwriter // import "k8s.io/apiserver/pkg/util/flushwriter" diff --git a/vendor/k8s.io/apiserver/pkg/util/flushwriter/writer.go b/vendor/k8s.io/apiserver/pkg/util/flushwriter/writer.go new file mode 100644 index 0000000000..748bd01085 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flushwriter/writer.go @@ -0,0 +1,53 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flushwriter + +import ( + "io" + "net/http" +) + +// Wrap wraps an io.Writer into a writer that flushes after every write if +// the writer implements the Flusher interface. +func Wrap(w io.Writer) io.Writer { + fw := &flushWriter{ + writer: w, + } + if flusher, ok := w.(http.Flusher); ok { + fw.flusher = flusher + } + return fw +} + +// flushWriter provides wrapper for responseWriter with HTTP streaming capabilities +type flushWriter struct { + flusher http.Flusher + writer io.Writer +} + +// Write is a FlushWriter implementation of the io.Writer that sends any buffered +// data to the client. +func (fw *flushWriter) Write(p []byte) (n int, err error) { + n, err = fw.writer.Write(p) + if err != nil { + return + } + if fw.flusher != nil { + fw.flusher.Flush() + } + return +} diff --git a/vendor/k8s.io/apiserver/pkg/util/openapi/proto.go b/vendor/k8s.io/apiserver/pkg/util/openapi/proto.go new file mode 100644 index 0000000000..0f95a5f794 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/openapi/proto.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "encoding/json" + + "github.com/go-openapi/spec" + "github.com/googleapis/gnostic/compiler" + openapi_v2 "github.com/googleapis/gnostic/openapiv2" + yaml "gopkg.in/yaml.v2" + + "k8s.io/kube-openapi/pkg/util/proto" +) + +// ToProtoModels builds the proto formatted models from OpenAPI spec +func ToProtoModels(openAPISpec *spec.Swagger) (proto.Models, error) { + specBytes, err := json.MarshalIndent(openAPISpec, " ", " ") + if err != nil { + return nil, err + } + + var info yaml.MapSlice + err = yaml.Unmarshal(specBytes, &info) + if err != nil { + return nil, err + } + + doc, err := openapi_v2.NewDocument(info, compiler.NewContext("$root", nil)) + if err != nil { + return nil, err + } + + models, err := proto.NewOpenAPIData(doc) + if err != nil { + return nil, err + } + + return models, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/util/shufflesharding/shufflesharding.go b/vendor/k8s.io/apiserver/pkg/util/shufflesharding/shufflesharding.go new file mode 100644 index 0000000000..6ef4ed8900 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/shufflesharding/shufflesharding.go @@ -0,0 +1,107 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shufflesharding + +import ( + "fmt" + "math" +) + +// MaxHashBits is the max bit length which can be used from hash value. +// If we use all bits of hash value, the critical(last) card shuffled by +// Dealer will be uneven to 2:3 (first half:second half) at most, +// in order to reduce this unevenness to 32:33, we set MaxHashBits to 60 here. +const MaxHashBits = 60 + +// RequiredEntropyBits makes a quick and slightly conservative estimate of the number +// of bits of hash value that are consumed in shuffle sharding a deck of the given size +// to a hand of the given size. The result is meaningful only if +// 1 <= handSize <= deckSize <= 1<<26. +func RequiredEntropyBits(deckSize, handSize int) int { + return int(math.Ceil(math.Log2(float64(deckSize)) * float64(handSize))) +} + +// Dealer contains some necessary parameters and provides some methods for shuffle sharding. +// Dealer is thread-safe. +type Dealer struct { + deckSize int + handSize int +} + +// NewDealer will create a Dealer with the given deckSize and handSize, will return error when +// deckSize or handSize is invalid as below. +// 1. deckSize or handSize is not positive +// 2. handSize is greater than deckSize +// 3. deckSize is impractically large (greater than 1<<26) +// 4. required entropy bits of deckSize and handSize is greater than MaxHashBits +func NewDealer(deckSize, handSize int) (*Dealer, error) { + if deckSize <= 0 || handSize <= 0 { + return nil, fmt.Errorf("deckSize %d or handSize %d is not positive", deckSize, handSize) + } + if handSize > deckSize { + return nil, fmt.Errorf("handSize %d is greater than deckSize %d", handSize, deckSize) + } + if deckSize > 1<<26 { + return nil, fmt.Errorf("deckSize %d is impractically large", deckSize) + } + if RequiredEntropyBits(deckSize, handSize) > MaxHashBits { + return nil, fmt.Errorf("required entropy bits of deckSize %d and handSize %d is greater than %d", deckSize, handSize, MaxHashBits) + } + + return &Dealer{ + deckSize: deckSize, + handSize: handSize, + }, nil +} + +// Deal shuffles a card deck and deals a hand of cards, using the given hashValue as the source of entropy. +// The deck size and hand size are properties of the Dealer. +// This function synchronously makes sequential calls to pick, one for each dealt card. +// Each card is identified by an integer in the range [0, deckSize). +// For example, for deckSize=128 and handSize=4 this function might call pick(14); pick(73); pick(119); pick(26). +func (d *Dealer) Deal(hashValue uint64, pick func(int)) { + // 15 is the largest possible value of handSize + var remainders [15]int + + for i := 0; i < d.handSize; i++ { + hashValueNext := hashValue / uint64(d.deckSize-i) + remainders[i] = int(hashValue - uint64(d.deckSize-i)*hashValueNext) + hashValue = hashValueNext + } + + for i := 0; i < d.handSize; i++ { + card := remainders[i] + for j := i; j > 0; j-- { + if card >= remainders[j-1] { + card++ + } + } + pick(card) + } +} + +// DealIntoHand shuffles and deals according to the Dealer's parameters, +// using the given hashValue as the source of entropy and then +// returns the dealt cards as a slice of `int`. +// If `hand` has the correct length as Dealer's handSize, it will be used as-is and no allocations will be made. +// If `hand` is nil or too small, it will be extended (performing an allocation). +// If `hand` is too large, a sub-slice will be returned. +func (d *Dealer) DealIntoHand(hashValue uint64, hand []int) []int { + h := hand[:0] + d.Deal(hashValue, func(card int) { h = append(h, card) }) + return h +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go b/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go new file mode 100644 index 0000000000..965bc8b58c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go @@ -0,0 +1,263 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "strconv" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + utilnet "k8s.io/apimachinery/pkg/util/net" + egressselector "k8s.io/apiserver/pkg/server/egressselector" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// AuthenticationInfoResolverWrapper can be used to inject Dial function to the +// rest.Config generated by the resolver. +type AuthenticationInfoResolverWrapper func(AuthenticationInfoResolver) AuthenticationInfoResolver + +// NewDefaultAuthenticationInfoResolverWrapper builds a default authn resolver wrapper +func NewDefaultAuthenticationInfoResolverWrapper( + proxyTransport *http.Transport, + egressSelector *egressselector.EgressSelector, + kubeapiserverClientConfig *rest.Config) AuthenticationInfoResolverWrapper { + + webhookAuthResolverWrapper := func(delegate AuthenticationInfoResolver) AuthenticationInfoResolver { + return &AuthenticationInfoResolverDelegator{ + ClientConfigForFunc: func(hostPort string) (*rest.Config, error) { + if hostPort == "kubernetes.default.svc:443" { + return kubeapiserverClientConfig, nil + } + ret, err := delegate.ClientConfigFor(hostPort) + if err != nil { + return nil, err + } + + if egressSelector != nil { + networkContext := egressselector.ControlPlane.AsNetworkContext() + var egressDialer utilnet.DialFunc + egressDialer, err = egressSelector.Lookup(networkContext) + + if err != nil { + return nil, err + } + + ret.Dial = egressDialer + } + return ret, nil + }, + ClientConfigForServiceFunc: func(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) { + if serviceName == "kubernetes" && serviceNamespace == corev1.NamespaceDefault && servicePort == 443 { + return kubeapiserverClientConfig, nil + } + ret, err := delegate.ClientConfigForService(serviceName, serviceNamespace, servicePort) + if err != nil { + return nil, err + } + + if egressSelector != nil { + networkContext := egressselector.Cluster.AsNetworkContext() + var egressDialer utilnet.DialFunc + egressDialer, err = egressSelector.Lookup(networkContext) + if err != nil { + return nil, err + } + + ret.Dial = egressDialer + } else if proxyTransport != nil && proxyTransport.DialContext != nil { + ret.Dial = proxyTransport.DialContext + } + return ret, nil + }, + } + } + return webhookAuthResolverWrapper +} + +// AuthenticationInfoResolver builds rest.Config base on the server or service +// name and service namespace. +type AuthenticationInfoResolver interface { + // ClientConfigFor builds rest.Config based on the hostPort. + ClientConfigFor(hostPort string) (*rest.Config, error) + // ClientConfigForService builds rest.Config based on the serviceName and + // serviceNamespace. + ClientConfigForService(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) +} + +// AuthenticationInfoResolverDelegator implements AuthenticationInfoResolver. +type AuthenticationInfoResolverDelegator struct { + ClientConfigForFunc func(hostPort string) (*rest.Config, error) + ClientConfigForServiceFunc func(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) +} + +// ClientConfigFor returns client config for given hostPort. +func (a *AuthenticationInfoResolverDelegator) ClientConfigFor(hostPort string) (*rest.Config, error) { + return a.ClientConfigForFunc(hostPort) +} + +// ClientConfigForService returns client config for given service. +func (a *AuthenticationInfoResolverDelegator) ClientConfigForService(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) { + return a.ClientConfigForServiceFunc(serviceName, serviceNamespace, servicePort) +} + +type defaultAuthenticationInfoResolver struct { + kubeconfig clientcmdapi.Config +} + +// NewDefaultAuthenticationInfoResolver generates an AuthenticationInfoResolver +// that builds rest.Config based on the kubeconfig file. kubeconfigFile is the +// path to the kubeconfig. +func NewDefaultAuthenticationInfoResolver(kubeconfigFile string) (AuthenticationInfoResolver, error) { + if len(kubeconfigFile) == 0 { + return &defaultAuthenticationInfoResolver{}, nil + } + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + loadingRules.ExplicitPath = kubeconfigFile + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + clientConfig, err := loader.RawConfig() + if err != nil { + return nil, err + } + + return &defaultAuthenticationInfoResolver{kubeconfig: clientConfig}, nil +} + +func (c *defaultAuthenticationInfoResolver) ClientConfigFor(hostPort string) (*rest.Config, error) { + return c.clientConfig(hostPort) +} + +func (c *defaultAuthenticationInfoResolver) ClientConfigForService(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) { + return c.clientConfig(net.JoinHostPort(serviceName+"."+serviceNamespace+".svc", strconv.Itoa(servicePort))) +} + +func (c *defaultAuthenticationInfoResolver) clientConfig(target string) (*rest.Config, error) { + // exact match + if authConfig, ok := c.kubeconfig.AuthInfos[target]; ok { + return restConfigFromKubeconfig(authConfig) + } + + // star prefixed match + serverSteps := strings.Split(target, ".") + for i := 1; i < len(serverSteps); i++ { + nickName := "*." + strings.Join(serverSteps[i:], ".") + if authConfig, ok := c.kubeconfig.AuthInfos[nickName]; ok { + return restConfigFromKubeconfig(authConfig) + } + } + + // If target included the default https port (443), search again without the port + if target, port, err := net.SplitHostPort(target); err == nil && port == "443" { + // exact match without port + if authConfig, ok := c.kubeconfig.AuthInfos[target]; ok { + return restConfigFromKubeconfig(authConfig) + } + + // star prefixed match without port + serverSteps := strings.Split(target, ".") + for i := 1; i < len(serverSteps); i++ { + nickName := "*." + strings.Join(serverSteps[i:], ".") + if authConfig, ok := c.kubeconfig.AuthInfos[nickName]; ok { + return restConfigFromKubeconfig(authConfig) + } + } + } + + // if we're trying to hit the kube-apiserver and there wasn't an explicit config, use the in-cluster config + if target == "kubernetes.default.svc:443" { + // if we can find an in-cluster-config use that. If we can't, fall through. + inClusterConfig, err := rest.InClusterConfig() + if err == nil { + return setGlobalDefaults(inClusterConfig), nil + } + } + + // star (default) match + if authConfig, ok := c.kubeconfig.AuthInfos["*"]; ok { + return restConfigFromKubeconfig(authConfig) + } + + // use the current context from the kubeconfig if possible + if len(c.kubeconfig.CurrentContext) > 0 { + if currContext, ok := c.kubeconfig.Contexts[c.kubeconfig.CurrentContext]; ok { + if len(currContext.AuthInfo) > 0 { + if currAuth, ok := c.kubeconfig.AuthInfos[currContext.AuthInfo]; ok { + return restConfigFromKubeconfig(currAuth) + } + } + } + } + + // anonymous + return setGlobalDefaults(&rest.Config{}), nil +} + +func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Config, error) { + config := &rest.Config{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + config.BearerToken = configAuthInfo.Token + config.BearerTokenFile = configAuthInfo.TokenFile + } else if len(configAuthInfo.TokenFile) > 0 { + tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) + if err != nil { + return nil, err + } + config.BearerToken = string(tokenBytes) + config.BearerTokenFile = configAuthInfo.TokenFile + } + if len(configAuthInfo.Impersonate) > 0 { + config.Impersonate = rest.ImpersonationConfig{ + UserName: configAuthInfo.Impersonate, + Groups: configAuthInfo.ImpersonateGroups, + Extra: configAuthInfo.ImpersonateUserExtra, + } + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + config.CertFile = configAuthInfo.ClientCertificate + config.CertData = configAuthInfo.ClientCertificateData + config.KeyFile = configAuthInfo.ClientKey + config.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + config.Username = configAuthInfo.Username + config.Password = configAuthInfo.Password + } + if configAuthInfo.Exec != nil { + config.ExecProvider = configAuthInfo.Exec.DeepCopy() + } + if configAuthInfo.AuthProvider != nil { + return nil, fmt.Errorf("auth provider not supported") + } + + return setGlobalDefaults(config), nil +} + +func setGlobalDefaults(config *rest.Config) *rest.Config { + config.UserAgent = "kube-apiserver-admission" + config.Timeout = 30 * time.Second + + return config +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/client.go b/vendor/k8s.io/apiserver/pkg/util/webhook/client.go new file mode 100644 index 0000000000..d750c3f7c0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/client.go @@ -0,0 +1,224 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "net/url" + "strconv" + + "github.com/hashicorp/golang-lru" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/rest" +) + +const ( + defaultCacheSize = 200 +) + +// ClientConfig defines parameters required for creating a hook client. +type ClientConfig struct { + Name string + URL string + CABundle []byte + Service *ClientConfigService +} + +// ClientConfigService defines service discovery parameters of the webhook. +type ClientConfigService struct { + Name string + Namespace string + Path string + Port int32 +} + +// ClientManager builds REST clients to talk to webhooks. It caches the clients +// to avoid duplicate creation. +type ClientManager struct { + authInfoResolver AuthenticationInfoResolver + serviceResolver ServiceResolver + negotiatedSerializer runtime.NegotiatedSerializer + cache *lru.Cache +} + +// NewClientManager creates a clientManager. +func NewClientManager(gvs []schema.GroupVersion, addToSchemaFuncs ...func(s *runtime.Scheme) error) (ClientManager, error) { + cache, err := lru.New(defaultCacheSize) + if err != nil { + return ClientManager{}, err + } + hookScheme := runtime.NewScheme() + for _, addToSchemaFunc := range addToSchemaFuncs { + if err := addToSchemaFunc(hookScheme); err != nil { + return ClientManager{}, err + } + } + return ClientManager{ + cache: cache, + negotiatedSerializer: serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{ + Serializer: serializer.NewCodecFactory(hookScheme).LegacyCodec(gvs...), + }), + }, nil +} + +// SetAuthenticationInfoResolverWrapper sets the +// AuthenticationInfoResolverWrapper. +func (cm *ClientManager) SetAuthenticationInfoResolverWrapper(wrapper AuthenticationInfoResolverWrapper) { + if wrapper != nil { + cm.authInfoResolver = wrapper(cm.authInfoResolver) + } +} + +// SetAuthenticationInfoResolver sets the AuthenticationInfoResolver. +func (cm *ClientManager) SetAuthenticationInfoResolver(resolver AuthenticationInfoResolver) { + cm.authInfoResolver = resolver +} + +// SetServiceResolver sets the ServiceResolver. +func (cm *ClientManager) SetServiceResolver(sr ServiceResolver) { + if sr != nil { + cm.serviceResolver = sr + } +} + +// Validate checks if ClientManager is properly set up. +func (cm *ClientManager) Validate() error { + var errs []error + if cm.negotiatedSerializer == nil { + errs = append(errs, fmt.Errorf("the clientManager requires a negotiatedSerializer")) + } + if cm.serviceResolver == nil { + errs = append(errs, fmt.Errorf("the clientManager requires a serviceResolver")) + } + if cm.authInfoResolver == nil { + errs = append(errs, fmt.Errorf("the clientManager requires an authInfoResolver")) + } + return utilerrors.NewAggregate(errs) +} + +// HookClient get a RESTClient from the cache, or constructs one based on the +// webhook configuration. +func (cm *ClientManager) HookClient(cc ClientConfig) (*rest.RESTClient, error) { + ccWithNoName := cc + ccWithNoName.Name = "" + cacheKey, err := json.Marshal(ccWithNoName) + if err != nil { + return nil, err + } + if client, ok := cm.cache.Get(string(cacheKey)); ok { + return client.(*rest.RESTClient), nil + } + + complete := func(cfg *rest.Config) (*rest.RESTClient, error) { + // Avoid client-side rate limiting talking to the webhook backend. + // Rate limiting should happen when deciding how many requests to serve. + cfg.QPS = -1 + + // Combine CAData from the config with any existing CA bundle provided + if len(cfg.TLSClientConfig.CAData) > 0 { + cfg.TLSClientConfig.CAData = append(cfg.TLSClientConfig.CAData, '\n') + } + cfg.TLSClientConfig.CAData = append(cfg.TLSClientConfig.CAData, cc.CABundle...) + + // Use http/1.1 instead of http/2. + // This is a workaround for http/2-enabled clients not load-balancing concurrent requests to multiple backends. + // See http://issue.k8s.io/75791 for details. + cfg.NextProtos = []string{"http/1.1"} + + cfg.ContentConfig.NegotiatedSerializer = cm.negotiatedSerializer + cfg.ContentConfig.ContentType = runtime.ContentTypeJSON + client, err := rest.UnversionedRESTClientFor(cfg) + if err == nil { + cm.cache.Add(string(cacheKey), client) + } + return client, err + } + + if cc.Service != nil { + port := cc.Service.Port + if port == 0 { + // Default to port 443 if no service port is specified + port = 443 + } + + restConfig, err := cm.authInfoResolver.ClientConfigForService(cc.Service.Name, cc.Service.Namespace, int(port)) + if err != nil { + return nil, err + } + cfg := rest.CopyConfig(restConfig) + serverName := cc.Service.Name + "." + cc.Service.Namespace + ".svc" + + host := net.JoinHostPort(serverName, strconv.Itoa(int(port))) + cfg.Host = "https://" + host + cfg.APIPath = cc.Service.Path + // Set the server name if not already set + if len(cfg.TLSClientConfig.ServerName) == 0 { + cfg.TLSClientConfig.ServerName = serverName + } + + delegateDialer := cfg.Dial + if delegateDialer == nil { + var d net.Dialer + delegateDialer = d.DialContext + } + cfg.Dial = func(ctx context.Context, network, addr string) (net.Conn, error) { + if addr == host { + u, err := cm.serviceResolver.ResolveEndpoint(cc.Service.Namespace, cc.Service.Name, port) + if err != nil { + return nil, err + } + addr = u.Host + } + return delegateDialer(ctx, network, addr) + } + + return complete(cfg) + } + + if cc.URL == "" { + return nil, &ErrCallingWebhook{WebhookName: cc.Name, Reason: errors.New("webhook configuration must have either service or URL")} + } + + u, err := url.Parse(cc.URL) + if err != nil { + return nil, &ErrCallingWebhook{WebhookName: cc.Name, Reason: fmt.Errorf("Unparsable URL: %v", err)} + } + + hostPort := u.Host + if len(u.Port()) == 0 { + // Default to port 443 if no port is specified + hostPort = net.JoinHostPort(hostPort, "443") + } + + restConfig, err := cm.authInfoResolver.ClientConfigFor(hostPort) + if err != nil { + return nil, err + } + + cfg := rest.CopyConfig(restConfig) + cfg.Host = u.Scheme + "://" + u.Host + cfg.APIPath = u.Path + + return complete(cfg) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/error.go b/vendor/k8s.io/apiserver/pkg/util/webhook/error.go new file mode 100644 index 0000000000..9c6f780e2f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/error.go @@ -0,0 +1,47 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +// ErrCallingWebhook is returned for transport-layer errors calling webhooks. It +// represents a failure to talk to the webhook, not the webhook rejecting a +// request. +type ErrCallingWebhook struct { + WebhookName string + Reason error +} + +func (e *ErrCallingWebhook) Error() string { + if e.Reason != nil { + return fmt.Sprintf("failed calling webhook %q: %v", e.WebhookName, e.Reason) + } + return fmt.Sprintf("failed calling webhook %q; no further details available", e.WebhookName) +} + +// ErrWebhookRejection represents a webhook properly rejecting a request. +type ErrWebhookRejection struct { + Status *apierrors.StatusError +} + +func (e *ErrWebhookRejection) Error() string { + return e.Status.Error() +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/gencerts.sh b/vendor/k8s.io/apiserver/pkg/util/webhook/gencerts.sh new file mode 100644 index 0000000000..de40773011 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/gencerts.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +# gencerts.sh generates the certificates for the webhook tests. +# +# It is not expected to be run often (there is no go generate rule), and mainly +# exists for documentation purposes. + +CN_BASE="webhook_tests" + +cat > server.conf << EOF +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth, serverAuth +subjectAltName = @alt_names +[alt_names] +IP.1 = 127.0.0.1 +EOF + +cat > client.conf << EOF +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth, serverAuth +subjectAltName = @alt_names +[alt_names] +IP.1 = 127.0.0.1 +EOF + +# Create a certificate authority +openssl genrsa -out caKey.pem 2048 +openssl req -x509 -new -nodes -key caKey.pem -days 100000 -out caCert.pem -subj "/CN=${CN_BASE}_ca" + +# Create a second certificate authority +openssl genrsa -out badCAKey.pem 2048 +openssl req -x509 -new -nodes -key badCAKey.pem -days 100000 -out badCACert.pem -subj "/CN=${CN_BASE}_ca" + +# Create a server certiticate +openssl genrsa -out serverKey.pem 2048 +openssl req -new -key serverKey.pem -out server.csr -subj "/CN=${CN_BASE}_server" -config server.conf +openssl x509 -req -in server.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out serverCert.pem -days 100000 -extensions v3_req -extfile server.conf + +# Create a client certiticate +openssl genrsa -out clientKey.pem 2048 +openssl req -new -key clientKey.pem -out client.csr -subj "/CN=${CN_BASE}_client" -config client.conf +openssl x509 -req -in client.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out clientCert.pem -days 100000 -extensions v3_req -extfile client.conf + +outfile=certs_test.go + +cat > $outfile << EOF +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated using openssl by the gencerts.sh script +// and holds raw certificates for the webhook tests. + +package webhook +EOF + +for file in caKey caCert badCAKey badCACert serverKey serverCert clientKey clientCert; do + data=$(cat ${file}.pem) + echo "" >> $outfile + echo "var $file = []byte(\`$data\`)" >> $outfile +done + +# Clean up after we're done. +rm ./*.pem +rm ./*.csr +rm ./*.srl +rm ./*.conf diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go b/vendor/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go new file mode 100644 index 0000000000..da140b1f0d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go @@ -0,0 +1,47 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "errors" + "fmt" + "net/url" +) + +// ServiceResolver knows how to convert a service reference into an actual location. +type ServiceResolver interface { + ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) +} + +type defaultServiceResolver struct{} + +// NewDefaultServiceResolver creates a new default server resolver. +func NewDefaultServiceResolver() ServiceResolver { + return &defaultServiceResolver{} +} + +// ResolveEndpoint constructs a service URL from a given namespace and name +// note that the name, namespace, and port are required and by default all +// created addresses use HTTPS scheme. +// for example: +// name=ross namespace=andromeda resolves to https://ross.andromeda.svc:443 +func (sr defaultServiceResolver) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) { + if len(name) == 0 || len(namespace) == 0 || port == 0 { + return nil, errors.New("cannot resolve an empty service name or namespace or port") + } + return &url.URL{Scheme: "https", Host: fmt.Sprintf("%s.%s.svc:%d", name, namespace, port)}, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go b/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go new file mode 100644 index 0000000000..c46cc9d6b8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go @@ -0,0 +1,105 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "fmt" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateWebhookURL validates webhook's URL. +func ValidateWebhookURL(fldPath *field.Path, URL string, forceHttps bool) field.ErrorList { + var allErrors field.ErrorList + const form = "; desired format: https://host[/path]" + if u, err := url.Parse(URL); err != nil { + allErrors = append(allErrors, field.Required(fldPath, "url must be a valid URL: "+err.Error()+form)) + } else { + if forceHttps && u.Scheme != "https" { + allErrors = append(allErrors, field.Invalid(fldPath, u.Scheme, "'https' is the only allowed URL scheme"+form)) + } + if len(u.Host) == 0 { + allErrors = append(allErrors, field.Invalid(fldPath, u.Host, "host must be provided"+form)) + } + if u.User != nil { + allErrors = append(allErrors, field.Invalid(fldPath, u.User.String(), "user information is not permitted in the URL")) + } + if len(u.Fragment) != 0 { + allErrors = append(allErrors, field.Invalid(fldPath, u.Fragment, "fragments are not permitted in the URL")) + } + if len(u.RawQuery) != 0 { + allErrors = append(allErrors, field.Invalid(fldPath, u.RawQuery, "query parameters are not permitted in the URL")) + } + } + return allErrors +} + +func ValidateWebhookService(fldPath *field.Path, namespace, name string, path *string, port int32) field.ErrorList { + var allErrors field.ErrorList + + if len(name) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("name"), "service name is required")) + } + + if len(namespace) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("namespace"), "service namespace is required")) + } + + if errs := validation.IsValidPortNum(int(port)); errs != nil { + allErrors = append(allErrors, field.Invalid(fldPath.Child("port"), port, "port is not valid: "+strings.Join(errs, ", "))) + } + + if path == nil { + return allErrors + } + + // TODO: replace below with url.Parse + verifying that host is empty? + + urlPath := *path + if urlPath == "/" || len(urlPath) == 0 { + return allErrors + } + if urlPath == "//" { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, "segment[0] may not be empty")) + return allErrors + } + + if !strings.HasPrefix(urlPath, "/") { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, "must start with a '/'")) + } + + urlPathToCheck := urlPath[1:] + if strings.HasSuffix(urlPathToCheck, "/") { + urlPathToCheck = urlPathToCheck[:len(urlPathToCheck)-1] + } + steps := strings.Split(urlPathToCheck, "/") + for i, step := range steps { + if len(step) == 0 { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, fmt.Sprintf("segment[%d] may not be empty", i))) + continue + } + failures := validation.IsDNS1123Subdomain(step) + for _, failure := range failures { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, fmt.Sprintf("segment[%d]: %v", i, failure))) + } + } + + return allErrors +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go b/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go new file mode 100644 index 0000000000..abaade352b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go @@ -0,0 +1,162 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webhook implements a generic HTTP webhook plugin. +package webhook + +import ( + "context" + "fmt" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// defaultRequestTimeout is set for all webhook request. This is the absolute +// timeout of the HTTP request, including reading the response body. +const defaultRequestTimeout = 30 * time.Second + +// DefaultRetryBackoffWithInitialDelay returns the default backoff parameters for webhook retry from a given initial delay. +// Handy for the client that provides a custom initial delay only. +func DefaultRetryBackoffWithInitialDelay(initialBackoffDelay time.Duration) wait.Backoff { + return wait.Backoff{ + Duration: initialBackoffDelay, + Factor: 1.5, + Jitter: 0.2, + Steps: 5, + } +} + +// GenericWebhook defines a generic client for webhooks with commonly used capabilities, +// such as retry requests. +type GenericWebhook struct { + RestClient *rest.RESTClient + RetryBackoff wait.Backoff + ShouldRetry func(error) bool +} + +// DefaultShouldRetry is a default implementation for the GenericWebhook ShouldRetry function property. +// If the error reason is one of: networking (connection reset) or http (InternalServerError (500), GatewayTimeout (504), TooManyRequests (429)), +// or apierrors.SuggestsClientDelay() returns true, then the function advises a retry. +// Otherwise it returns false for an immediate fail. +func DefaultShouldRetry(err error) bool { + // these errors indicate a transient error that should be retried. + if utilnet.IsConnectionReset(err) || apierrors.IsInternalError(err) || apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) { + return true + } + // if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry. + if _, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry { + return true + } + return false +} + +// NewGenericWebhook creates a new GenericWebhook from the provided kubeconfig file. +func NewGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFactory, kubeConfigFile string, groupVersions []schema.GroupVersion, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (*GenericWebhook, error) { + return newGenericWebhook(scheme, codecFactory, kubeConfigFile, groupVersions, retryBackoff, defaultRequestTimeout, customDial) +} + +func newGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFactory, kubeConfigFile string, groupVersions []schema.GroupVersion, retryBackoff wait.Backoff, requestTimeout time.Duration, customDial utilnet.DialFunc) (*GenericWebhook, error) { + for _, groupVersion := range groupVersions { + if !scheme.IsVersionRegistered(groupVersion) { + return nil, fmt.Errorf("webhook plugin requires enabling extension resource: %s", groupVersion) + } + } + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + loadingRules.ExplicitPath = kubeConfigFile + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + + clientConfig, err := loader.ClientConfig() + if err != nil { + return nil, err + } + + // Kubeconfigs can't set a timeout, this can only be set through a command line flag. + // + // https://github.com/kubernetes/client-go/blob/master/tools/clientcmd/overrides.go + // + // Set this to something reasonable so request to webhooks don't hang forever. + clientConfig.Timeout = requestTimeout + + // Avoid client-side rate limiting talking to the webhook backend. + // Rate limiting should happen when deciding how many requests to serve. + clientConfig.QPS = -1 + + codec := codecFactory.LegacyCodec(groupVersions...) + clientConfig.ContentConfig.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) + + clientConfig.Dial = customDial + + restClient, err := rest.UnversionedRESTClientFor(clientConfig) + if err != nil { + return nil, err + } + + return &GenericWebhook{restClient, retryBackoff, DefaultShouldRetry}, nil +} + +// WithExponentialBackoff will retry webhookFn() as specified by the given backoff parameters with exponentially +// increasing backoff when it returns an error for which this GenericWebhook's ShouldRetry function returns true, +// confirming it to be retriable. If no ShouldRetry has been defined for the webhook, +// then the default one is used (DefaultShouldRetry). +func (g *GenericWebhook) WithExponentialBackoff(ctx context.Context, webhookFn func() rest.Result) rest.Result { + var result rest.Result + shouldRetry := g.ShouldRetry + if shouldRetry == nil { + shouldRetry = DefaultShouldRetry + } + WithExponentialBackoff(ctx, g.RetryBackoff, func() error { + result = webhookFn() + return result.Error() + }, shouldRetry) + return result +} + +// WithExponentialBackoff will retry webhookFn up to 5 times with exponentially increasing backoff when +// it returns an error for which shouldRetry returns true, confirming it to be retriable. +func WithExponentialBackoff(ctx context.Context, retryBackoff wait.Backoff, webhookFn func() error, shouldRetry func(error) bool) error { + // having a webhook error allows us to track the last actual webhook error for requests that + // are later cancelled or time out. + var webhookErr error + err := wait.ExponentialBackoffWithContext(ctx, retryBackoff, func() (bool, error) { + webhookErr = webhookFn() + if shouldRetry(webhookErr) { + return false, nil + } + if webhookErr != nil { + return false, webhookErr + } + return true, nil + }) + + switch { + // we check for webhookErr first, if webhookErr is set it's the most important error to return. + case webhookErr != nil: + return webhookErr + case err != nil: + return fmt.Errorf("webhook call failed: %s", err.Error()) + default: + return nil + } +} diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go b/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go new file mode 100644 index 0000000000..11474bfffd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go @@ -0,0 +1,352 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wsstream + +import ( + "encoding/base64" + "fmt" + "io" + "net/http" + "regexp" + "strings" + "time" + + "golang.org/x/net/websocket" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// The Websocket subprotocol "channel.k8s.io" prepends each binary message with a byte indicating +// the channel number (zero indexed) the message was sent on. Messages in both directions should +// prefix their messages with this channel byte. When used for remote execution, the channel numbers +// are by convention defined to match the POSIX file-descriptors assigned to STDIN, STDOUT, and STDERR +// (0, 1, and 2). No other conversion is performed on the raw subprotocol - writes are sent as they +// are received by the server. +// +// Example client session: +// +// CONNECT http://server.com with subprotocol "channel.k8s.io" +// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN) +// READ []byte{1, 10} # receive "\n" on channel 1 (STDOUT) +// CLOSE +// +const ChannelWebSocketProtocol = "channel.k8s.io" + +// The Websocket subprotocol "base64.channel.k8s.io" base64 encodes each message with a character +// indicating the channel number (zero indexed) the message was sent on. Messages in both directions +// should prefix their messages with this channel char. When used for remote execution, the channel +// numbers are by convention defined to match the POSIX file-descriptors assigned to STDIN, STDOUT, +// and STDERR ('0', '1', and '2'). The data received on the server is base64 decoded (and must be +// be valid) and data written by the server to the client is base64 encoded. +// +// Example client session: +// +// CONNECT http://server.com with subprotocol "base64.channel.k8s.io" +// WRITE []byte{48, 90, 109, 57, 118, 67, 103, 111, 61} # send "foo\n" (base64: "Zm9vCgo=") on channel '0' (STDIN) +// READ []byte{49, 67, 103, 61, 61} # receive "\n" (base64: "Cg==") on channel '1' (STDOUT) +// CLOSE +// +const Base64ChannelWebSocketProtocol = "base64.channel.k8s.io" + +type codecType int + +const ( + rawCodec codecType = iota + base64Codec +) + +type ChannelType int + +const ( + IgnoreChannel ChannelType = iota + ReadChannel + WriteChannel + ReadWriteChannel +) + +var ( + // connectionUpgradeRegex matches any Connection header value that includes upgrade + connectionUpgradeRegex = regexp.MustCompile("(^|.*,\\s*)upgrade($|\\s*,)") +) + +// IsWebSocketRequest returns true if the incoming request contains connection upgrade headers +// for WebSockets. +func IsWebSocketRequest(req *http.Request) bool { + if !strings.EqualFold(req.Header.Get("Upgrade"), "websocket") { + return false + } + return connectionUpgradeRegex.MatchString(strings.ToLower(req.Header.Get("Connection"))) +} + +// IgnoreReceives reads from a WebSocket until it is closed, then returns. If timeout is set, the +// read and write deadlines are pushed every time a new message is received. +func IgnoreReceives(ws *websocket.Conn, timeout time.Duration) { + defer runtime.HandleCrash() + var data []byte + for { + resetTimeout(ws, timeout) + if err := websocket.Message.Receive(ws, &data); err != nil { + return + } + } +} + +// handshake ensures the provided user protocol matches one of the allowed protocols. It returns +// no error if no protocol is specified. +func handshake(config *websocket.Config, req *http.Request, allowed []string) error { + protocols := config.Protocol + if len(protocols) == 0 { + protocols = []string{""} + } + + for _, protocol := range protocols { + for _, allow := range allowed { + if allow == protocol { + config.Protocol = []string{protocol} + return nil + } + } + } + + return fmt.Errorf("requested protocol(s) are not supported: %v; supports %v", config.Protocol, allowed) +} + +// ChannelProtocolConfig describes a websocket subprotocol with channels. +type ChannelProtocolConfig struct { + Binary bool + Channels []ChannelType +} + +// NewDefaultChannelProtocols returns a channel protocol map with the +// subprotocols "", "channel.k8s.io", "base64.channel.k8s.io" and the given +// channels. +func NewDefaultChannelProtocols(channels []ChannelType) map[string]ChannelProtocolConfig { + return map[string]ChannelProtocolConfig{ + "": {Binary: true, Channels: channels}, + ChannelWebSocketProtocol: {Binary: true, Channels: channels}, + Base64ChannelWebSocketProtocol: {Binary: false, Channels: channels}, + } +} + +// Conn supports sending multiple binary channels over a websocket connection. +type Conn struct { + protocols map[string]ChannelProtocolConfig + selectedProtocol string + channels []*websocketChannel + codec codecType + ready chan struct{} + ws *websocket.Conn + timeout time.Duration +} + +// NewConn creates a WebSocket connection that supports a set of channels. Channels begin each +// web socket message with a single byte indicating the channel number (0-N). 255 is reserved for +// future use. The channel types for each channel are passed as an array, supporting the different +// duplex modes. Read and Write refer to whether the channel can be used as a Reader or Writer. +// +// The protocols parameter maps subprotocol names to ChannelProtocols. The empty string subprotocol +// name is used if websocket.Config.Protocol is empty. +func NewConn(protocols map[string]ChannelProtocolConfig) *Conn { + return &Conn{ + ready: make(chan struct{}), + protocols: protocols, + } +} + +// SetIdleTimeout sets the interval for both reads and writes before timeout. If not specified, +// there is no timeout on the connection. +func (conn *Conn) SetIdleTimeout(duration time.Duration) { + conn.timeout = duration +} + +// Open the connection and create channels for reading and writing. It returns +// the selected subprotocol, a slice of channels and an error. +func (conn *Conn) Open(w http.ResponseWriter, req *http.Request) (string, []io.ReadWriteCloser, error) { + go func() { + defer runtime.HandleCrash() + defer conn.Close() + websocket.Server{Handshake: conn.handshake, Handler: conn.handle}.ServeHTTP(w, req) + }() + <-conn.ready + rwc := make([]io.ReadWriteCloser, len(conn.channels)) + for i := range conn.channels { + rwc[i] = conn.channels[i] + } + return conn.selectedProtocol, rwc, nil +} + +func (conn *Conn) initialize(ws *websocket.Conn) { + negotiated := ws.Config().Protocol + conn.selectedProtocol = negotiated[0] + p := conn.protocols[conn.selectedProtocol] + if p.Binary { + conn.codec = rawCodec + } else { + conn.codec = base64Codec + } + conn.ws = ws + conn.channels = make([]*websocketChannel, len(p.Channels)) + for i, t := range p.Channels { + switch t { + case ReadChannel: + conn.channels[i] = newWebsocketChannel(conn, byte(i), true, false) + case WriteChannel: + conn.channels[i] = newWebsocketChannel(conn, byte(i), false, true) + case ReadWriteChannel: + conn.channels[i] = newWebsocketChannel(conn, byte(i), true, true) + case IgnoreChannel: + conn.channels[i] = newWebsocketChannel(conn, byte(i), false, false) + } + } + + close(conn.ready) +} + +func (conn *Conn) handshake(config *websocket.Config, req *http.Request) error { + supportedProtocols := make([]string, 0, len(conn.protocols)) + for p := range conn.protocols { + supportedProtocols = append(supportedProtocols, p) + } + return handshake(config, req, supportedProtocols) +} + +func (conn *Conn) resetTimeout() { + if conn.timeout > 0 { + conn.ws.SetDeadline(time.Now().Add(conn.timeout)) + } +} + +// Close is only valid after Open has been called +func (conn *Conn) Close() error { + <-conn.ready + for _, s := range conn.channels { + s.Close() + } + conn.ws.Close() + return nil +} + +// handle implements a websocket handler. +func (conn *Conn) handle(ws *websocket.Conn) { + defer conn.Close() + conn.initialize(ws) + + for { + conn.resetTimeout() + var data []byte + if err := websocket.Message.Receive(ws, &data); err != nil { + if err != io.EOF { + klog.Errorf("Error on socket receive: %v", err) + } + break + } + if len(data) == 0 { + continue + } + channel := data[0] + if conn.codec == base64Codec { + channel = channel - '0' + } + data = data[1:] + if int(channel) >= len(conn.channels) { + klog.V(6).Infof("Frame is targeted for a reader %d that is not valid, possible protocol error", channel) + continue + } + if _, err := conn.channels[channel].DataFromSocket(data); err != nil { + klog.Errorf("Unable to write frame to %d: %v\n%s", channel, err, string(data)) + continue + } + } +} + +// write multiplexes the specified channel onto the websocket +func (conn *Conn) write(num byte, data []byte) (int, error) { + conn.resetTimeout() + switch conn.codec { + case rawCodec: + frame := make([]byte, len(data)+1) + frame[0] = num + copy(frame[1:], data) + if err := websocket.Message.Send(conn.ws, frame); err != nil { + return 0, err + } + case base64Codec: + frame := string('0'+num) + base64.StdEncoding.EncodeToString(data) + if err := websocket.Message.Send(conn.ws, frame); err != nil { + return 0, err + } + } + return len(data), nil +} + +// websocketChannel represents a channel in a connection +type websocketChannel struct { + conn *Conn + num byte + r io.Reader + w io.WriteCloser + + read, write bool +} + +// newWebsocketChannel creates a pipe for writing to a websocket. Do not write to this pipe +// prior to the connection being opened. It may be no, half, or full duplex depending on +// read and write. +func newWebsocketChannel(conn *Conn, num byte, read, write bool) *websocketChannel { + r, w := io.Pipe() + return &websocketChannel{conn, num, r, w, read, write} +} + +func (p *websocketChannel) Write(data []byte) (int, error) { + if !p.write { + return len(data), nil + } + return p.conn.write(p.num, data) +} + +// DataFromSocket is invoked by the connection receiver to move data from the connection +// into a specific channel. +func (p *websocketChannel) DataFromSocket(data []byte) (int, error) { + if !p.read { + return len(data), nil + } + + switch p.conn.codec { + case rawCodec: + return p.w.Write(data) + case base64Codec: + dst := make([]byte, len(data)) + n, err := base64.StdEncoding.Decode(dst, data) + if err != nil { + return 0, err + } + return p.w.Write(dst[:n]) + } + return 0, nil +} + +func (p *websocketChannel) Read(data []byte) (int, error) { + if !p.read { + return 0, io.EOF + } + return p.r.Read(data) +} + +func (p *websocketChannel) Close() error { + return p.w.Close() +} diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go b/vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go new file mode 100644 index 0000000000..694ce81d20 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package wsstream contains utilities for streaming content over WebSockets. +// The Conn type allows callers to multiplex multiple read/write channels over +// a single websocket. The Reader type allows an io.Reader to be copied over +// a websocket channel as binary content. +package wsstream // import "k8s.io/apiserver/pkg/util/wsstream" diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go b/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go new file mode 100644 index 0000000000..ba7e6a519a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go @@ -0,0 +1,177 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wsstream + +import ( + "encoding/base64" + "io" + "net/http" + "sync" + "time" + + "golang.org/x/net/websocket" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// The WebSocket subprotocol "binary.k8s.io" will only send messages to the +// client and ignore messages sent to the server. The received messages are +// the exact bytes written to the stream. Zero byte messages are possible. +const binaryWebSocketProtocol = "binary.k8s.io" + +// The WebSocket subprotocol "base64.binary.k8s.io" will only send messages to the +// client and ignore messages sent to the server. The received messages are +// a base64 version of the bytes written to the stream. Zero byte messages are +// possible. +const base64BinaryWebSocketProtocol = "base64.binary.k8s.io" + +// ReaderProtocolConfig describes a websocket subprotocol with one stream. +type ReaderProtocolConfig struct { + Binary bool +} + +// NewDefaultReaderProtocols returns a stream protocol map with the +// subprotocols "", "channel.k8s.io", "base64.channel.k8s.io". +func NewDefaultReaderProtocols() map[string]ReaderProtocolConfig { + return map[string]ReaderProtocolConfig{ + "": {Binary: true}, + binaryWebSocketProtocol: {Binary: true}, + base64BinaryWebSocketProtocol: {Binary: false}, + } +} + +// Reader supports returning an arbitrary byte stream over a websocket channel. +type Reader struct { + err chan error + r io.Reader + ping bool + timeout time.Duration + protocols map[string]ReaderProtocolConfig + selectedProtocol string + + handleCrash func(additionalHandlers ...func(interface{})) // overridable for testing +} + +// NewReader creates a WebSocket pipe that will copy the contents of r to a provided +// WebSocket connection. If ping is true, a zero length message will be sent to the client +// before the stream begins reading. +// +// The protocols parameter maps subprotocol names to StreamProtocols. The empty string +// subprotocol name is used if websocket.Config.Protocol is empty. +func NewReader(r io.Reader, ping bool, protocols map[string]ReaderProtocolConfig) *Reader { + return &Reader{ + r: r, + err: make(chan error), + ping: ping, + protocols: protocols, + handleCrash: runtime.HandleCrash, + } +} + +// SetIdleTimeout sets the interval for both reads and writes before timeout. If not specified, +// there is no timeout on the reader. +func (r *Reader) SetIdleTimeout(duration time.Duration) { + r.timeout = duration +} + +func (r *Reader) handshake(config *websocket.Config, req *http.Request) error { + supportedProtocols := make([]string, 0, len(r.protocols)) + for p := range r.protocols { + supportedProtocols = append(supportedProtocols, p) + } + return handshake(config, req, supportedProtocols) +} + +// Copy the reader to the response. The created WebSocket is closed after this +// method completes. +func (r *Reader) Copy(w http.ResponseWriter, req *http.Request) error { + go func() { + defer r.handleCrash() + websocket.Server{Handshake: r.handshake, Handler: r.handle}.ServeHTTP(w, req) + }() + return <-r.err +} + +// handle implements a WebSocket handler. +func (r *Reader) handle(ws *websocket.Conn) { + // Close the connection when the client requests it, or when we finish streaming, whichever happens first + closeConnOnce := &sync.Once{} + closeConn := func() { + closeConnOnce.Do(func() { + ws.Close() + }) + } + + negotiated := ws.Config().Protocol + r.selectedProtocol = negotiated[0] + defer close(r.err) + defer closeConn() + + go func() { + defer runtime.HandleCrash() + // This blocks until the connection is closed. + // Client should not send anything. + IgnoreReceives(ws, r.timeout) + // Once the client closes, we should also close + closeConn() + }() + + r.err <- messageCopy(ws, r.r, !r.protocols[r.selectedProtocol].Binary, r.ping, r.timeout) +} + +func resetTimeout(ws *websocket.Conn, timeout time.Duration) { + if timeout > 0 { + ws.SetDeadline(time.Now().Add(timeout)) + } +} + +func messageCopy(ws *websocket.Conn, r io.Reader, base64Encode, ping bool, timeout time.Duration) error { + buf := make([]byte, 2048) + if ping { + resetTimeout(ws, timeout) + if base64Encode { + if err := websocket.Message.Send(ws, ""); err != nil { + return err + } + } else { + if err := websocket.Message.Send(ws, []byte{}); err != nil { + return err + } + } + } + for { + resetTimeout(ws, timeout) + n, err := r.Read(buf) + if err != nil { + if err == io.EOF { + return nil + } + return err + } + if n > 0 { + if base64Encode { + if err := websocket.Message.Send(ws, base64.StdEncoding.EncodeToString(buf[:n])); err != nil { + return err + } + } else { + if err := websocket.Message.Send(ws, buf[:n]); err != nil { + return err + } + } + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/warning/context.go b/vendor/k8s.io/apiserver/pkg/warning/context.go new file mode 100644 index 0000000000..1b9dd54dfc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/warning/context.go @@ -0,0 +1,59 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package warning + +import ( + "context" +) + +// The key type is unexported to prevent collisions +type key int + +const ( + // auditAnnotationsKey is the context key for the audit annotations. + warningRecorderKey key = iota +) + +// Recorder provides a method for recording warnings +type Recorder interface { + // AddWarning adds the specified warning to the response. + // agent must be valid UTF-8, and must not contain spaces, quotes, backslashes, or control characters. + // text must be valid UTF-8, and must not contain control characters. + AddWarning(agent, text string) +} + +// WithWarningRecorder returns a new context that wraps the provided context and contains the provided Recorder implementation. +// The returned context can be passed to AddWarning(). +func WithWarningRecorder(ctx context.Context, recorder Recorder) context.Context { + return context.WithValue(ctx, warningRecorderKey, recorder) +} +func warningRecorderFrom(ctx context.Context) (Recorder, bool) { + recorder, ok := ctx.Value(warningRecorderKey).(Recorder) + return recorder, ok +} + +// AddWarning records a warning for the specified agent and text to the Recorder added to the provided context using WithWarningRecorder(). +// If no Recorder exists in the provided context, this is a no-op. +// agent must be valid UTF-8, and must not contain spaces, quotes, backslashes, or control characters. +// text must be valid UTF-8, and must not contain control characters. +func AddWarning(ctx context.Context, agent string, text string) { + recorder, ok := warningRecorderFrom(ctx) + if !ok { + return + } + recorder.AddWarning(agent, text) +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go new file mode 100644 index 0000000000..a96d9bea30 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go @@ -0,0 +1,290 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package buffered + +import ( + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" + "k8s.io/client-go/util/flowcontrol" +) + +// PluginName is the name reported in error metrics. +const PluginName = "buffered" + +// BatchConfig represents batching delegate audit backend configuration. +type BatchConfig struct { + // BufferSize defines a size of the buffering queue. + BufferSize int + // MaxBatchSize defines maximum size of a batch. + MaxBatchSize int + // MaxBatchWait indicates the maximum interval between two batches. + MaxBatchWait time.Duration + + // ThrottleEnable defines whether throttling will be applied to the batching process. + ThrottleEnable bool + // ThrottleQPS defines the allowed rate of batches per second sent to the delegate backend. + ThrottleQPS float32 + // ThrottleBurst defines the maximum number of requests sent to the delegate backend at the same moment in case + // the capacity defined by ThrottleQPS was not utilized. + ThrottleBurst int + + // Whether the delegate backend should be called asynchronously. + AsyncDelegate bool +} + +type bufferedBackend struct { + // The delegate backend that actually exports events. + delegateBackend audit.Backend + + // Channel to buffer events before sending to the delegate backend. + buffer chan *auditinternal.Event + // Maximum number of events in a batch sent to the delegate backend. + maxBatchSize int + // Amount of time to wait after sending a batch to the delegate backend before sending another one. + // + // Receiving maxBatchSize events will always trigger sending a batch, regardless of the amount of time passed. + maxBatchWait time.Duration + + // Whether the delegate backend should be called asynchronously. + asyncDelegate bool + + // Channel to signal that the batching routine has processed all remaining events and exited. + // Once `shutdownCh` is closed no new events will be sent to the delegate backend. + shutdownCh chan struct{} + + // WaitGroup to control the concurrency of sending batches to the delegate backend. + // Worker routine calls Add before sending a batch and + // then spawns a routine that calls Done after batch was processed by the delegate backend. + // This WaitGroup is used to wait for all sending routines to finish before shutting down audit backend. + wg sync.WaitGroup + + // Limits the number of batches sent to the delegate backend per second. + throttle flowcontrol.RateLimiter +} + +var _ audit.Backend = &bufferedBackend{} + +// NewBackend returns a buffered audit backend that wraps delegate backend. +// Buffered backend automatically runs and shuts down the delegate backend. +func NewBackend(delegate audit.Backend, config BatchConfig) audit.Backend { + var throttle flowcontrol.RateLimiter + if config.ThrottleEnable { + throttle = flowcontrol.NewTokenBucketRateLimiter(config.ThrottleQPS, config.ThrottleBurst) + } + return &bufferedBackend{ + delegateBackend: delegate, + buffer: make(chan *auditinternal.Event, config.BufferSize), + maxBatchSize: config.MaxBatchSize, + maxBatchWait: config.MaxBatchWait, + asyncDelegate: config.AsyncDelegate, + shutdownCh: make(chan struct{}), + wg: sync.WaitGroup{}, + throttle: throttle, + } +} + +func (b *bufferedBackend) Run(stopCh <-chan struct{}) error { + go func() { + // Signal that the working routine has exited. + defer close(b.shutdownCh) + + b.processIncomingEvents(stopCh) + + // Handle the events that were received after the last buffer + // scraping and before this line. Since the buffer is closed, no new + // events will come through. + allEventsProcessed := false + timer := make(chan time.Time) + for !allEventsProcessed { + allEventsProcessed = func() bool { + // Recover from any panic in order to try to process all remaining events. + // Note, that in case of a panic, the return value will be false and + // the loop execution will continue. + defer runtime.HandleCrash() + + events := b.collectEvents(timer, wait.NeverStop) + b.processEvents(events) + return len(events) == 0 + }() + } + }() + return b.delegateBackend.Run(stopCh) +} + +// Shutdown blocks until stopCh passed to the Run method is closed and all +// events added prior to that moment are batched and sent to the delegate backend. +func (b *bufferedBackend) Shutdown() { + // Wait until the routine spawned in Run method exits. + <-b.shutdownCh + + // Wait until all sending routines exit. + // + // - When b.shutdownCh is closed, we know that the goroutine in Run has terminated. + // - This means that processIncomingEvents has terminated. + // - Which means that b.buffer is closed and cannot accept any new events anymore. + // - Because processEvents is called synchronously from the Run goroutine, the waitgroup has its final value. + // Hence wg.Wait will not miss any more outgoing batches. + b.wg.Wait() + + b.delegateBackend.Shutdown() +} + +// processIncomingEvents runs a loop that collects events from the buffer. When +// b.stopCh is closed, processIncomingEvents stops and closes the buffer. +func (b *bufferedBackend) processIncomingEvents(stopCh <-chan struct{}) { + defer close(b.buffer) + + var ( + maxWaitChan <-chan time.Time + maxWaitTimer *time.Timer + ) + // Only use max wait batching if batching is enabled. + if b.maxBatchSize > 1 { + maxWaitTimer = time.NewTimer(b.maxBatchWait) + maxWaitChan = maxWaitTimer.C + defer maxWaitTimer.Stop() + } + + for { + func() { + // Recover from any panics caused by this function so a panic in the + // goroutine can't bring down the main routine. + defer runtime.HandleCrash() + + if b.maxBatchSize > 1 { + maxWaitTimer.Reset(b.maxBatchWait) + } + b.processEvents(b.collectEvents(maxWaitChan, stopCh)) + }() + + select { + case <-stopCh: + return + default: + } + } +} + +// collectEvents attempts to collect some number of events in a batch. +// +// The following things can cause collectEvents to stop and return the list +// of events: +// +// * Maximum number of events for a batch. +// * Timer has passed. +// * Buffer channel is closed and empty. +// * stopCh is closed. +func (b *bufferedBackend) collectEvents(timer <-chan time.Time, stopCh <-chan struct{}) []*auditinternal.Event { + var events []*auditinternal.Event + +L: + for i := 0; i < b.maxBatchSize; i++ { + select { + case ev, ok := <-b.buffer: + // Buffer channel was closed and no new events will follow. + if !ok { + break L + } + events = append(events, ev) + case <-timer: + // Timer has expired. Send currently accumulated batch. + break L + case <-stopCh: + // Backend has been stopped. Send currently accumulated batch. + break L + } + } + + return events +} + +// processEvents process the batch events in a goroutine using delegateBackend's ProcessEvents. +func (b *bufferedBackend) processEvents(events []*auditinternal.Event) { + if len(events) == 0 { + return + } + + // TODO(audit): Should control the number of active goroutines + // if one goroutine takes 5 seconds to finish, the number of goroutines can be 5 * defaultBatchThrottleQPS + if b.throttle != nil { + b.throttle.Accept() + } + + if b.asyncDelegate { + b.wg.Add(1) + go func() { + defer b.wg.Done() + defer runtime.HandleCrash() + + // Execute the real processing in a goroutine to keep it from blocking. + // This lets the batching routine continue draining the queue immediately. + b.delegateBackend.ProcessEvents(events...) + }() + } else { + func() { + defer runtime.HandleCrash() + + // Execute the real processing in a goroutine to keep it from blocking. + // This lets the batching routine continue draining the queue immediately. + b.delegateBackend.ProcessEvents(events...) + }() + } +} + +func (b *bufferedBackend) ProcessEvents(ev ...*auditinternal.Event) bool { + // The following mechanism is in place to support the situation when audit + // events are still coming after the backend was stopped. + var sendErr error + var evIndex int + + // If the delegateBackend was shutdown and the buffer channel was closed, an + // attempt to add an event to it will result in panic that we should + // recover from. + defer func() { + if err := recover(); err != nil { + sendErr = fmt.Errorf("audit backend shut down") + } + if sendErr != nil { + audit.HandlePluginError(PluginName, sendErr, ev[evIndex:]...) + } + }() + + for i, e := range ev { + evIndex = i + // Per the audit.Backend interface these events are reused after being + // sent to the Sink. Deep copy and send the copy to the queue. + event := e.DeepCopy() + + select { + case b.buffer <- event: + default: + sendErr = fmt.Errorf("audit buffer queue blocked") + return true + } + } + return true +} + +func (b *bufferedBackend) String() string { + return fmt.Sprintf("%s<%s>", PluginName, b.delegateBackend) +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go new file mode 100644 index 0000000000..a82599e426 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package buffered provides an implementation for the audit.Backend interface +// that batches incoming audit events and sends batches to the delegate audit.Backend. +package buffered // import "k8s.io/apiserver/plugin/pkg/audit/buffered" diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go new file mode 100644 index 0000000000..2ef2cc6ece --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go @@ -0,0 +1,104 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "fmt" + "io" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" +) + +const ( + // FormatLegacy saves event in 1-line text format. + FormatLegacy = "legacy" + // FormatJson saves event in structured json format. + FormatJson = "json" + + // PluginName is the name of this plugin, to be used in help and logs. + PluginName = "log" +) + +// AllowedFormats are the formats known by log backend. +var AllowedFormats = []string{ + FormatLegacy, + FormatJson, +} + +type backend struct { + out io.Writer + format string + encoder runtime.Encoder +} + +var _ audit.Backend = &backend{} + +func NewBackend(out io.Writer, format string, groupVersion schema.GroupVersion) audit.Backend { + return &backend{ + out: out, + format: format, + encoder: audit.Codecs.LegacyCodec(groupVersion), + } +} + +func (b *backend) ProcessEvents(events ...*auditinternal.Event) bool { + success := true + for _, ev := range events { + success = b.logEvent(ev) && success + } + return success +} + +func (b *backend) logEvent(ev *auditinternal.Event) bool { + line := "" + switch b.format { + case FormatLegacy: + line = audit.EventString(ev) + "\n" + case FormatJson: + bs, err := runtime.Encode(b.encoder, ev) + if err != nil { + audit.HandlePluginError(PluginName, err, ev) + return false + } + line = string(bs[:]) + default: + audit.HandlePluginError(PluginName, fmt.Errorf("log format %q is not in list of known formats (%s)", + b.format, strings.Join(AllowedFormats, ",")), ev) + return false + } + if _, err := fmt.Fprint(b.out, line); err != nil { + audit.HandlePluginError(PluginName, err, ev) + return false + } + return true +} + +func (b *backend) Run(stopCh <-chan struct{}) error { + return nil +} + +func (b *backend) Shutdown() { + // Nothing to do here. +} + +func (b *backend) String() string { + return PluginName +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/doc.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/doc.go new file mode 100644 index 0000000000..9392ac3147 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package truncate provides an implementation for the audit.Backend interface +// that truncates audit events and sends them to the delegate audit.Backend. +package truncate // import "k8s.io/apiserver/plugin/pkg/audit/truncate" diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go new file mode 100644 index 0000000000..de1c2d9f74 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go @@ -0,0 +1,160 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package truncate + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" +) + +const ( + // PluginName is the name reported in error metrics. + PluginName = "truncate" + + // annotationKey defines the name of the annotation used to indicate truncation. + annotationKey = "audit.k8s.io/truncated" + // annotationValue defines the value of the annotation used to indicate truncation. + annotationValue = "true" +) + +// Config represents truncating backend configuration. +type Config struct { + // MaxEventSize defines max allowed size of the event. If the event is larger, + // truncating will be performed. + MaxEventSize int64 + + // MaxBatchSize defined max allowed size of the batch of events, passed to the backend. + // If the total size of the batch is larger than this number, batch will be split. Actual + // size of the serialized request might be slightly higher, on the order of hundreds of bytes. + MaxBatchSize int64 +} + +type backend struct { + // The delegate backend that actually exports events. + delegateBackend audit.Backend + + // Configuration used for truncation. + c Config + + // Encoder used to calculate audit event sizes. + e runtime.Encoder +} + +var _ audit.Backend = &backend{} + +// NewBackend returns a new truncating backend, using configuration passed in the parameters. +// Truncate backend automatically runs and shut downs the delegate backend. +func NewBackend(delegateBackend audit.Backend, config Config, groupVersion schema.GroupVersion) audit.Backend { + return &backend{ + delegateBackend: delegateBackend, + c: config, + e: audit.Codecs.LegacyCodec(groupVersion), + } +} + +func (b *backend) ProcessEvents(events ...*auditinternal.Event) bool { + var errors []error + var impacted []*auditinternal.Event + var batch []*auditinternal.Event + var batchSize int64 + success := true + for _, event := range events { + size, err := b.calcSize(event) + // If event was correctly serialized, but the size is more than allowed + // and it makes sense to do trimming, i.e. there's a request and/or + // response present, try to strip away request and response. + if err == nil && size > b.c.MaxEventSize && event.Level.GreaterOrEqual(auditinternal.LevelRequest) { + event = truncate(event) + size, err = b.calcSize(event) + } + if err != nil { + errors = append(errors, err) + impacted = append(impacted, event) + continue + } + if size > b.c.MaxEventSize { + errors = append(errors, fmt.Errorf("event is too large even after truncating")) + impacted = append(impacted, event) + continue + } + + if len(batch) > 0 && batchSize+size > b.c.MaxBatchSize { + success = b.delegateBackend.ProcessEvents(batch...) && success + batch = []*auditinternal.Event{} + batchSize = 0 + } + + batchSize += size + batch = append(batch, event) + } + + if len(batch) > 0 { + success = b.delegateBackend.ProcessEvents(batch...) && success + } + + if len(impacted) > 0 { + audit.HandlePluginError(PluginName, utilerrors.NewAggregate(errors), impacted...) + } + return success +} + +// truncate removed request and response objects from the audit events, +// to try and keep at least metadata. +func truncate(e *auditinternal.Event) *auditinternal.Event { + // Make a shallow copy to avoid copying response/request objects. + newEvent := &auditinternal.Event{} + *newEvent = *e + + newEvent.RequestObject = nil + newEvent.ResponseObject = nil + audit.LogAnnotation(newEvent, annotationKey, annotationValue) + return newEvent +} + +func (b *backend) Run(stopCh <-chan struct{}) error { + return b.delegateBackend.Run(stopCh) +} + +func (b *backend) Shutdown() { + b.delegateBackend.Shutdown() +} + +func (b *backend) calcSize(e *auditinternal.Event) (int64, error) { + s := &sizer{} + if err := b.e.Encode(e, s); err != nil { + return 0, err + } + return s.Size, nil +} + +func (b *backend) String() string { + return fmt.Sprintf("%s<%s>", PluginName, b.delegateBackend) +} + +type sizer struct { + Size int64 +} + +func (s *sizer) Write(p []byte) (n int, err error) { + s.Size += int64(len(p)) + return len(p), nil +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go new file mode 100644 index 0000000000..0a2aa70787 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go @@ -0,0 +1,139 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webhook implements the audit.Backend interface using HTTP webhooks. +package webhook + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/apis/audit/install" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/client-go/rest" + utiltrace "k8s.io/utils/trace" +) + +const ( + // PluginName is the name of this plugin, to be used in help and logs. + PluginName = "webhook" + + // DefaultInitialBackoffDelay is the default amount of time to wait before + // retrying sending audit events through a webhook. + DefaultInitialBackoffDelay = 10 * time.Second +) + +func init() { + install.Install(audit.Scheme) +} + +// retryOnError enforces the webhook client to retry requests +// on error regardless of its nature. +// The default implementation considers a very limited set of +// 'retriable' errors, assuming correct use of HTTP codes by +// external webhooks. +// That may easily lead to dropped audit events. In fact, there is +// hardly any error that could be a justified reason NOT to retry +// sending audit events if there is even a slight chance that the +// receiving service gets back to normal at some point. +func retryOnError(err error) bool { + if err != nil { + return true + } + return false +} + +func loadWebhook(configFile string, groupVersion schema.GroupVersion, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (*webhook.GenericWebhook, error) { + w, err := webhook.NewGenericWebhook(audit.Scheme, audit.Codecs, configFile, + []schema.GroupVersion{groupVersion}, retryBackoff, customDial) + if err != nil { + return nil, err + } + + w.ShouldRetry = retryOnError + return w, nil +} + +type backend struct { + w *webhook.GenericWebhook + name string +} + +// NewDynamicBackend returns an audit backend configured from a REST client that +// sends events over HTTP to an external service. +func NewDynamicBackend(rc *rest.RESTClient, retryBackoff wait.Backoff) audit.Backend { + return &backend{ + w: &webhook.GenericWebhook{ + RestClient: rc, + RetryBackoff: retryBackoff, + ShouldRetry: retryOnError, + }, + name: fmt.Sprintf("dynamic_%s", PluginName), + } +} + +// NewBackend returns an audit backend that sends events over HTTP to an external service. +func NewBackend(kubeConfigFile string, groupVersion schema.GroupVersion, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (audit.Backend, error) { + w, err := loadWebhook(kubeConfigFile, groupVersion, retryBackoff, customDial) + if err != nil { + return nil, err + } + return &backend{w: w, name: PluginName}, nil +} + +func (b *backend) Run(stopCh <-chan struct{}) error { + return nil +} + +func (b *backend) Shutdown() { + // nothing to do here +} + +func (b *backend) ProcessEvents(ev ...*auditinternal.Event) bool { + if err := b.processEvents(ev...); err != nil { + audit.HandlePluginError(b.String(), err, ev...) + return false + } + return true +} + +func (b *backend) processEvents(ev ...*auditinternal.Event) error { + var list auditinternal.EventList + for _, e := range ev { + list.Items = append(list.Items, *e) + } + return b.w.WithExponentialBackoff(context.Background(), func() rest.Result { + trace := utiltrace.New("Call Audit Events webhook", + utiltrace.Field{"name", b.name}, + utiltrace.Field{"event-count", len(list.Items)}) + // Only log audit webhook traces that exceed a 25ms per object limit plus a 50ms + // request overhead allowance. The high per object limit used here is primarily to + // allow enough time for the serialization/deserialization of audit events, which + // contain nested request and response objects plus additional event fields. + defer trace.LogIfLong(time.Duration(50+25*len(list.Items)) * time.Millisecond) + return b.w.RestClient.Post().Body(&list).Do(context.TODO()) + }).Error() +} + +func (b *backend) String() string { + return b.name +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go new file mode 100644 index 0000000000..d4bf1b45a9 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go @@ -0,0 +1,257 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webhook implements the authenticator.Token interface using HTTP webhooks. +package webhook + +import ( + "context" + "errors" + "fmt" + "time" + + authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/client-go/kubernetes/scheme" + authenticationv1client "k8s.io/client-go/kubernetes/typed/authentication/v1" + "k8s.io/klog/v2" +) + +// DefaultRetryBackoff returns the default backoff parameters for webhook retry. +func DefaultRetryBackoff() *wait.Backoff { + backoff := webhook.DefaultRetryBackoffWithInitialDelay(500 * time.Millisecond) + return &backoff +} + +// Ensure WebhookTokenAuthenticator implements the authenticator.Token interface. +var _ authenticator.Token = (*WebhookTokenAuthenticator)(nil) + +type tokenReviewer interface { + Create(ctx context.Context, review *authenticationv1.TokenReview, _ metav1.CreateOptions) (*authenticationv1.TokenReview, error) +} + +type WebhookTokenAuthenticator struct { + tokenReview tokenReviewer + retryBackoff wait.Backoff + implicitAuds authenticator.Audiences +} + +// NewFromInterface creates a webhook authenticator using the given tokenReview +// client. It is recommend to wrap this authenticator with the token cache +// authenticator implemented in +// k8s.io/apiserver/pkg/authentication/token/cache. +func NewFromInterface(tokenReview authenticationv1client.TokenReviewInterface, implicitAuds authenticator.Audiences, retryBackoff wait.Backoff) (*WebhookTokenAuthenticator, error) { + return newWithBackoff(tokenReview, retryBackoff, implicitAuds) +} + +// New creates a new WebhookTokenAuthenticator from the provided kubeconfig +// file. It is recommend to wrap this authenticator with the token cache +// authenticator implemented in +// k8s.io/apiserver/pkg/authentication/token/cache. +func New(kubeConfigFile string, version string, implicitAuds authenticator.Audiences, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (*WebhookTokenAuthenticator, error) { + tokenReview, err := tokenReviewInterfaceFromKubeconfig(kubeConfigFile, version, retryBackoff, customDial) + if err != nil { + return nil, err + } + return newWithBackoff(tokenReview, retryBackoff, implicitAuds) +} + +// newWithBackoff allows tests to skip the sleep. +func newWithBackoff(tokenReview tokenReviewer, retryBackoff wait.Backoff, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { + return &WebhookTokenAuthenticator{tokenReview, retryBackoff, implicitAuds}, nil +} + +// AuthenticateToken implements the authenticator.Token interface. +func (w *WebhookTokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + // We take implicit audiences of the API server at WebhookTokenAuthenticator + // construction time. The outline of how we validate audience here is: + // + // * if the ctx is not audience limited, don't do any audience validation. + // * if ctx is audience-limited, add the audiences to the tokenreview spec + // * if the tokenreview returns with audiences in the status that intersect + // with the audiences in the ctx, copy into the response and return success + // * if the tokenreview returns without an audience in the status, ensure + // the ctx audiences intersect with the implicit audiences, and set the + // intersection in the response. + // * otherwise return unauthenticated. + wantAuds, checkAuds := authenticator.AudiencesFrom(ctx) + r := &authenticationv1.TokenReview{ + Spec: authenticationv1.TokenReviewSpec{ + Token: token, + Audiences: wantAuds, + }, + } + var ( + result *authenticationv1.TokenReview + err error + auds authenticator.Audiences + ) + webhook.WithExponentialBackoff(ctx, w.retryBackoff, func() error { + result, err = w.tokenReview.Create(ctx, r, metav1.CreateOptions{}) + return err + }, webhook.DefaultShouldRetry) + if err != nil { + // An error here indicates bad configuration or an outage. Log for debugging. + klog.Errorf("Failed to make webhook authenticator request: %v", err) + return nil, false, err + } + + if checkAuds { + gotAuds := w.implicitAuds + if len(result.Status.Audiences) > 0 { + gotAuds = result.Status.Audiences + } + auds = wantAuds.Intersect(gotAuds) + if len(auds) == 0 { + return nil, false, nil + } + } + + r.Status = result.Status + if !r.Status.Authenticated { + var err error + if len(r.Status.Error) != 0 { + err = errors.New(r.Status.Error) + } + return nil, false, err + } + + var extra map[string][]string + if r.Status.User.Extra != nil { + extra = map[string][]string{} + for k, v := range r.Status.User.Extra { + extra[k] = v + } + } + + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: r.Status.User.Username, + UID: r.Status.User.UID, + Groups: r.Status.User.Groups, + Extra: extra, + }, + Audiences: auds, + }, true, nil +} + +// tokenReviewInterfaceFromKubeconfig builds a client from the specified kubeconfig file, +// and returns a TokenReviewInterface that uses that client. Note that the client submits TokenReview +// requests to the exact path specified in the kubeconfig file, so arbitrary non-API servers can be targeted. +func tokenReviewInterfaceFromKubeconfig(kubeConfigFile string, version string, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (tokenReviewer, error) { + localScheme := runtime.NewScheme() + if err := scheme.AddToScheme(localScheme); err != nil { + return nil, err + } + + switch version { + case authenticationv1.SchemeGroupVersion.Version: + groupVersions := []schema.GroupVersion{authenticationv1.SchemeGroupVersion} + if err := localScheme.SetVersionPriority(groupVersions...); err != nil { + return nil, err + } + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, retryBackoff, customDial) + if err != nil { + return nil, err + } + return &tokenReviewV1Client{gw}, nil + + case authenticationv1beta1.SchemeGroupVersion.Version: + groupVersions := []schema.GroupVersion{authenticationv1beta1.SchemeGroupVersion} + if err := localScheme.SetVersionPriority(groupVersions...); err != nil { + return nil, err + } + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, retryBackoff, customDial) + if err != nil { + return nil, err + } + return &tokenReviewV1beta1Client{gw}, nil + + default: + return nil, fmt.Errorf( + "unsupported authentication webhook version %q, supported versions are %q, %q", + version, + authenticationv1.SchemeGroupVersion.Version, + authenticationv1beta1.SchemeGroupVersion.Version, + ) + } + +} + +type tokenReviewV1Client struct { + w *webhook.GenericWebhook +} + +func (t *tokenReviewV1Client) Create(ctx context.Context, review *authenticationv1.TokenReview, _ metav1.CreateOptions) (*authenticationv1.TokenReview, error) { + result := &authenticationv1.TokenReview{} + err := t.w.RestClient.Post().Body(review).Do(ctx).Into(result) + return result, err +} + +type tokenReviewV1beta1Client struct { + w *webhook.GenericWebhook +} + +func (t *tokenReviewV1beta1Client) Create(ctx context.Context, review *authenticationv1.TokenReview, _ metav1.CreateOptions) (*authenticationv1.TokenReview, error) { + v1beta1Review := &authenticationv1beta1.TokenReview{Spec: v1SpecToV1beta1Spec(&review.Spec)} + v1beta1Result := &authenticationv1beta1.TokenReview{} + err := t.w.RestClient.Post().Body(v1beta1Review).Do(ctx).Into(v1beta1Result) + if err != nil { + return nil, err + } + review.Status = v1beta1StatusToV1Status(&v1beta1Result.Status) + return review, nil +} + +func v1SpecToV1beta1Spec(in *authenticationv1.TokenReviewSpec) authenticationv1beta1.TokenReviewSpec { + return authenticationv1beta1.TokenReviewSpec{ + Token: in.Token, + Audiences: in.Audiences, + } +} + +func v1beta1StatusToV1Status(in *authenticationv1beta1.TokenReviewStatus) authenticationv1.TokenReviewStatus { + return authenticationv1.TokenReviewStatus{ + Authenticated: in.Authenticated, + User: v1beta1UserToV1User(in.User), + Audiences: in.Audiences, + Error: in.Error, + } +} + +func v1beta1UserToV1User(u authenticationv1beta1.UserInfo) authenticationv1.UserInfo { + var extra map[string]authenticationv1.ExtraValue + if u.Extra != nil { + extra = make(map[string]authenticationv1.ExtraValue, len(u.Extra)) + for k, v := range u.Extra { + extra[k] = authenticationv1.ExtraValue(v) + } + } + return authenticationv1.UserInfo{ + Username: u.Username, + UID: u.UID, + Groups: u.Groups, + Extra: extra, + } +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/gencerts.sh b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/gencerts.sh new file mode 100644 index 0000000000..a66f8f381e --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/gencerts.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash + +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +# gencerts.sh generates the certificates for the webhook authz plugin tests. +# +# It is not expected to be run often (there is no go generate rule), and mainly +# exists for documentation purposes. + +cat > server.conf << EOF +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth +subjectAltName = @alt_names +[alt_names] +IP.1 = 127.0.0.1 +EOF + +cat > client.conf << EOF +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth +EOF + +# Create a certificate authority +openssl genrsa -out caKey.pem 2048 +openssl req -x509 -new -nodes -key caKey.pem -days 100000 -out caCert.pem -subj "/CN=webhook_authz_ca" + +# Create a second certificate authority +openssl genrsa -out badCAKey.pem 2048 +openssl req -x509 -new -nodes -key badCAKey.pem -days 100000 -out badCACert.pem -subj "/CN=webhook_authz_ca" + +# Create a server certiticate +openssl genrsa -out serverKey.pem 2048 +openssl req -new -key serverKey.pem -out server.csr -subj "/CN=webhook_authz_server" -config server.conf +openssl x509 -req -in server.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out serverCert.pem -days 100000 -extensions v3_req -extfile server.conf + +# Create a client certiticate +openssl genrsa -out clientKey.pem 2048 +openssl req -new -key clientKey.pem -out client.csr -subj "/CN=webhook_authz_client" -config client.conf +openssl x509 -req -in client.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out clientCert.pem -days 100000 -extensions v3_req -extfile client.conf + +outfile=certs_test.go + +cat > $outfile << EOF +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated using openssl by the gencerts.sh script +// and holds raw certificates for the webhook tests. + +package webhook +EOF + +for file in caKey caCert badCAKey badCACert serverKey serverCert clientKey clientCert; do + data=$(cat ${file}.pem) + echo "" >> $outfile + echo "var $file = []byte(\`$data\`)" >> $outfile +done + +# Clean up after we're done. +rm ./*.pem +rm ./*.csr +rm ./*.srl +rm ./*.conf diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go new file mode 100644 index 0000000000..5c9f28ad40 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -0,0 +1,386 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webhook implements the authorizer.Authorizer interface using HTTP webhooks. +package webhook + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "k8s.io/klog/v2" + + authorizationv1 "k8s.io/api/authorization/v1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/cache" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/client-go/kubernetes/scheme" + authorizationv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" +) + +const ( + // The maximum length of requester-controlled attributes to allow caching. + maxControlledAttrCacheSize = 10000 +) + +// DefaultRetryBackoff returns the default backoff parameters for webhook retry. +func DefaultRetryBackoff() *wait.Backoff { + backoff := webhook.DefaultRetryBackoffWithInitialDelay(500 * time.Millisecond) + return &backoff +} + +// Ensure Webhook implements the authorizer.Authorizer interface. +var _ authorizer.Authorizer = (*WebhookAuthorizer)(nil) + +type subjectAccessReviewer interface { + Create(context.Context, *authorizationv1.SubjectAccessReview, metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, error) +} + +type WebhookAuthorizer struct { + subjectAccessReview subjectAccessReviewer + responseCache *cache.LRUExpireCache + authorizedTTL time.Duration + unauthorizedTTL time.Duration + retryBackoff wait.Backoff + decisionOnError authorizer.Decision +} + +// NewFromInterface creates a WebhookAuthorizer using the given subjectAccessReview client +func NewFromInterface(subjectAccessReview authorizationv1client.SubjectAccessReviewInterface, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff) (*WebhookAuthorizer, error) { + return newWithBackoff(subjectAccessReview, authorizedTTL, unauthorizedTTL, retryBackoff) +} + +// New creates a new WebhookAuthorizer from the provided kubeconfig file. +// The config's cluster field is used to refer to the remote service, user refers to the returned authorizer. +// +// # clusters refers to the remote service. +// clusters: +// - name: name-of-remote-authz-service +// cluster: +// certificate-authority: /path/to/ca.pem # CA for verifying the remote service. +// server: https://authz.example.com/authorize # URL of remote service to query. Must use 'https'. +// +// # users refers to the API server's webhook configuration. +// users: +// - name: name-of-api-server +// user: +// client-certificate: /path/to/cert.pem # cert for the webhook plugin to use +// client-key: /path/to/key.pem # key matching the cert +// +// For additional HTTP configuration, refer to the kubeconfig documentation +// https://kubernetes.io/docs/user-guide/kubeconfig-file/. +func New(kubeConfigFile string, version string, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (*WebhookAuthorizer, error) { + subjectAccessReview, err := subjectAccessReviewInterfaceFromKubeconfig(kubeConfigFile, version, retryBackoff, customDial) + if err != nil { + return nil, err + } + return newWithBackoff(subjectAccessReview, authorizedTTL, unauthorizedTTL, retryBackoff) +} + +// newWithBackoff allows tests to skip the sleep. +func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff) (*WebhookAuthorizer, error) { + return &WebhookAuthorizer{ + subjectAccessReview: subjectAccessReview, + responseCache: cache.NewLRUExpireCache(8192), + authorizedTTL: authorizedTTL, + unauthorizedTTL: unauthorizedTTL, + retryBackoff: retryBackoff, + decisionOnError: authorizer.DecisionNoOpinion, + }, nil +} + +// Authorize makes a REST request to the remote service describing the attempted action as a JSON +// serialized api.authorization.v1beta1.SubjectAccessReview object. An example request body is +// provided below. +// +// { +// "apiVersion": "authorization.k8s.io/v1beta1", +// "kind": "SubjectAccessReview", +// "spec": { +// "resourceAttributes": { +// "namespace": "kittensandponies", +// "verb": "GET", +// "group": "group3", +// "resource": "pods" +// }, +// "user": "jane", +// "group": [ +// "group1", +// "group2" +// ] +// } +// } +// +// The remote service is expected to fill the SubjectAccessReviewStatus field to either allow or +// disallow access. A permissive response would return: +// +// { +// "apiVersion": "authorization.k8s.io/v1beta1", +// "kind": "SubjectAccessReview", +// "status": { +// "allowed": true +// } +// } +// +// To disallow access, the remote service would return: +// +// { +// "apiVersion": "authorization.k8s.io/v1beta1", +// "kind": "SubjectAccessReview", +// "status": { +// "allowed": false, +// "reason": "user does not have read access to the namespace" +// } +// } +// +// TODO(mikedanese): We should eventually support failing closed when we +// encounter an error. We are failing open now to preserve backwards compatible +// behavior. +func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attributes) (decision authorizer.Decision, reason string, err error) { + r := &authorizationv1.SubjectAccessReview{} + if user := attr.GetUser(); user != nil { + r.Spec = authorizationv1.SubjectAccessReviewSpec{ + User: user.GetName(), + UID: user.GetUID(), + Groups: user.GetGroups(), + Extra: convertToSARExtra(user.GetExtra()), + } + } + + if attr.IsResourceRequest() { + r.Spec.ResourceAttributes = &authorizationv1.ResourceAttributes{ + Namespace: attr.GetNamespace(), + Verb: attr.GetVerb(), + Group: attr.GetAPIGroup(), + Version: attr.GetAPIVersion(), + Resource: attr.GetResource(), + Subresource: attr.GetSubresource(), + Name: attr.GetName(), + } + } else { + r.Spec.NonResourceAttributes = &authorizationv1.NonResourceAttributes{ + Path: attr.GetPath(), + Verb: attr.GetVerb(), + } + } + key, err := json.Marshal(r.Spec) + if err != nil { + return w.decisionOnError, "", err + } + if entry, ok := w.responseCache.Get(string(key)); ok { + r.Status = entry.(authorizationv1.SubjectAccessReviewStatus) + } else { + var ( + result *authorizationv1.SubjectAccessReview + err error + ) + webhook.WithExponentialBackoff(ctx, w.retryBackoff, func() error { + result, err = w.subjectAccessReview.Create(ctx, r, metav1.CreateOptions{}) + return err + }, webhook.DefaultShouldRetry) + if err != nil { + // An error here indicates bad configuration or an outage. Log for debugging. + klog.Errorf("Failed to make webhook authorizer request: %v", err) + return w.decisionOnError, "", err + } + r.Status = result.Status + if shouldCache(attr) { + if r.Status.Allowed { + w.responseCache.Add(string(key), r.Status, w.authorizedTTL) + } else { + w.responseCache.Add(string(key), r.Status, w.unauthorizedTTL) + } + } + } + switch { + case r.Status.Denied && r.Status.Allowed: + return authorizer.DecisionDeny, r.Status.Reason, fmt.Errorf("webhook subject access review returned both allow and deny response") + case r.Status.Denied: + return authorizer.DecisionDeny, r.Status.Reason, nil + case r.Status.Allowed: + return authorizer.DecisionAllow, r.Status.Reason, nil + default: + return authorizer.DecisionNoOpinion, r.Status.Reason, nil + } + +} + +//TODO: need to finish the method to get the rules when using webhook mode +func (w *WebhookAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { + var ( + resourceRules []authorizer.ResourceRuleInfo + nonResourceRules []authorizer.NonResourceRuleInfo + ) + incomplete := true + return resourceRules, nonResourceRules, incomplete, fmt.Errorf("webhook authorizer does not support user rule resolution") +} + +func convertToSARExtra(extra map[string][]string) map[string]authorizationv1.ExtraValue { + if extra == nil { + return nil + } + ret := map[string]authorizationv1.ExtraValue{} + for k, v := range extra { + ret[k] = authorizationv1.ExtraValue(v) + } + + return ret +} + +// subjectAccessReviewInterfaceFromKubeconfig builds a client from the specified kubeconfig file, +// and returns a SubjectAccessReviewInterface that uses that client. Note that the client submits SubjectAccessReview +// requests to the exact path specified in the kubeconfig file, so arbitrary non-API servers can be targeted. +func subjectAccessReviewInterfaceFromKubeconfig(kubeConfigFile string, version string, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (subjectAccessReviewer, error) { + localScheme := runtime.NewScheme() + if err := scheme.AddToScheme(localScheme); err != nil { + return nil, err + } + + switch version { + case authorizationv1.SchemeGroupVersion.Version: + groupVersions := []schema.GroupVersion{authorizationv1.SchemeGroupVersion} + if err := localScheme.SetVersionPriority(groupVersions...); err != nil { + return nil, err + } + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, retryBackoff, customDial) + if err != nil { + return nil, err + } + return &subjectAccessReviewV1Client{gw}, nil + + case authorizationv1beta1.SchemeGroupVersion.Version: + groupVersions := []schema.GroupVersion{authorizationv1beta1.SchemeGroupVersion} + if err := localScheme.SetVersionPriority(groupVersions...); err != nil { + return nil, err + } + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, retryBackoff, customDial) + if err != nil { + return nil, err + } + return &subjectAccessReviewV1beta1Client{gw}, nil + + default: + return nil, fmt.Errorf( + "unsupported webhook authorizer version %q, supported versions are %q, %q", + version, + authorizationv1.SchemeGroupVersion.Version, + authorizationv1beta1.SchemeGroupVersion.Version, + ) + } +} + +type subjectAccessReviewV1Client struct { + w *webhook.GenericWebhook +} + +func (t *subjectAccessReviewV1Client) Create(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview, _ metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, error) { + result := &authorizationv1.SubjectAccessReview{} + err := t.w.RestClient.Post().Body(subjectAccessReview).Do(ctx).Into(result) + return result, err +} + +type subjectAccessReviewV1beta1Client struct { + w *webhook.GenericWebhook +} + +func (t *subjectAccessReviewV1beta1Client) Create(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview, _ metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, error) { + v1beta1Review := &authorizationv1beta1.SubjectAccessReview{Spec: v1SpecToV1beta1Spec(&subjectAccessReview.Spec)} + v1beta1Result := &authorizationv1beta1.SubjectAccessReview{} + err := t.w.RestClient.Post().Body(v1beta1Review).Do(ctx).Into(v1beta1Result) + if err == nil { + subjectAccessReview.Status = v1beta1StatusToV1Status(&v1beta1Result.Status) + } + return subjectAccessReview, err +} + +// shouldCache determines whether it is safe to cache the given request attributes. If the +// requester-controlled attributes are too large, this may be a DoS attempt, so we skip the cache. +func shouldCache(attr authorizer.Attributes) bool { + controlledAttrSize := int64(len(attr.GetNamespace())) + + int64(len(attr.GetVerb())) + + int64(len(attr.GetAPIGroup())) + + int64(len(attr.GetAPIVersion())) + + int64(len(attr.GetResource())) + + int64(len(attr.GetSubresource())) + + int64(len(attr.GetName())) + + int64(len(attr.GetPath())) + return controlledAttrSize < maxControlledAttrCacheSize +} + +func v1beta1StatusToV1Status(in *authorizationv1beta1.SubjectAccessReviewStatus) authorizationv1.SubjectAccessReviewStatus { + return authorizationv1.SubjectAccessReviewStatus{ + Allowed: in.Allowed, + Denied: in.Denied, + Reason: in.Reason, + EvaluationError: in.EvaluationError, + } +} + +func v1SpecToV1beta1Spec(in *authorizationv1.SubjectAccessReviewSpec) authorizationv1beta1.SubjectAccessReviewSpec { + return authorizationv1beta1.SubjectAccessReviewSpec{ + ResourceAttributes: v1ResourceAttributesToV1beta1ResourceAttributes(in.ResourceAttributes), + NonResourceAttributes: v1NonResourceAttributesToV1beta1NonResourceAttributes(in.NonResourceAttributes), + User: in.User, + Groups: in.Groups, + Extra: v1ExtraToV1beta1Extra(in.Extra), + UID: in.UID, + } +} + +func v1ResourceAttributesToV1beta1ResourceAttributes(in *authorizationv1.ResourceAttributes) *authorizationv1beta1.ResourceAttributes { + if in == nil { + return nil + } + return &authorizationv1beta1.ResourceAttributes{ + Namespace: in.Namespace, + Verb: in.Verb, + Group: in.Group, + Version: in.Version, + Resource: in.Resource, + Subresource: in.Subresource, + Name: in.Name, + } +} + +func v1NonResourceAttributesToV1beta1NonResourceAttributes(in *authorizationv1.NonResourceAttributes) *authorizationv1beta1.NonResourceAttributes { + if in == nil { + return nil + } + return &authorizationv1beta1.NonResourceAttributes{ + Path: in.Path, + Verb: in.Verb, + } +} + +func v1ExtraToV1beta1Extra(in map[string]authorizationv1.ExtraValue) map[string]authorizationv1beta1.ExtraValue { + if in == nil { + return nil + } + ret := make(map[string]authorizationv1beta1.ExtraValue, len(in)) + for k, v := range in { + ret[k] = authorizationv1beta1.ExtraValue(v) + } + return ret +} diff --git a/vendor/k8s.io/client-go/tools/events/OWNERS b/vendor/k8s.io/client-go/tools/events/OWNERS new file mode 100644 index 0000000000..fbd0a6a013 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- yastij +- wojtek-t +reviewers: +- yastij +- wojtek-t diff --git a/vendor/k8s.io/client-go/tools/events/doc.go b/vendor/k8s.io/client-go/tools/events/doc.go new file mode 100644 index 0000000000..795582b028 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package events has all client logic for recording and reporting +// "k8s.io/api/events/v1beta1".Event events. +package events // import "k8s.io/client-go/tools/events" diff --git a/vendor/k8s.io/client-go/tools/events/event_broadcaster.go b/vendor/k8s.io/client-go/tools/events/event_broadcaster.go new file mode 100644 index 0000000000..bde888ee98 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/event_broadcaster.go @@ -0,0 +1,384 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "context" + "fmt" + "os" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + eventsv1 "k8s.io/api/events/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedv1core "k8s.io/client-go/kubernetes/typed/core/v1" + typedeventsv1 "k8s.io/client-go/kubernetes/typed/events/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/record/util" + "k8s.io/klog/v2" +) + +const ( + maxTriesPerEvent = 12 + finishTime = 6 * time.Minute + refreshTime = 30 * time.Minute + maxQueuedEvents = 1000 +) + +var defaultSleepDuration = 10 * time.Second + +// TODO: validate impact of copying and investigate hashing +type eventKey struct { + action string + reason string + reportingController string + regarding corev1.ObjectReference + related corev1.ObjectReference +} + +type eventBroadcasterImpl struct { + *watch.Broadcaster + mu sync.Mutex + eventCache map[eventKey]*eventsv1.Event + sleepDuration time.Duration + sink EventSink +} + +// EventSinkImpl wraps EventsV1Interface to implement EventSink. +// TODO: this makes it easier for testing purpose and masks the logic of performing API calls. +// Note that rollbacking to raw clientset should also be transparent. +type EventSinkImpl struct { + Interface typedeventsv1.EventsV1Interface +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (e *EventSinkImpl) Create(event *eventsv1.Event) (*eventsv1.Event, error) { + if event.Namespace == "" { + return nil, fmt.Errorf("can't create an event with empty namespace") + } + return e.Interface.Events(event.Namespace).Create(context.TODO(), event, metav1.CreateOptions{}) +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (e *EventSinkImpl) Update(event *eventsv1.Event) (*eventsv1.Event, error) { + if event.Namespace == "" { + return nil, fmt.Errorf("can't update an event with empty namespace") + } + return e.Interface.Events(event.Namespace).Update(context.TODO(), event, metav1.UpdateOptions{}) +} + +// Patch applies the patch and returns the patched event, and an error, if there is any. +func (e *EventSinkImpl) Patch(event *eventsv1.Event, data []byte) (*eventsv1.Event, error) { + if event.Namespace == "" { + return nil, fmt.Errorf("can't patch an event with empty namespace") + } + return e.Interface.Events(event.Namespace).Patch(context.TODO(), event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) +} + +// NewBroadcaster Creates a new event broadcaster. +func NewBroadcaster(sink EventSink) EventBroadcaster { + return newBroadcaster(sink, defaultSleepDuration, map[eventKey]*eventsv1.Event{}) +} + +// NewBroadcasterForTest Creates a new event broadcaster for test purposes. +func newBroadcaster(sink EventSink, sleepDuration time.Duration, eventCache map[eventKey]*eventsv1.Event) EventBroadcaster { + return &eventBroadcasterImpl{ + Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + eventCache: eventCache, + sleepDuration: sleepDuration, + sink: sink, + } +} + +func (e *eventBroadcasterImpl) Shutdown() { + e.Broadcaster.Shutdown() +} + +// refreshExistingEventSeries refresh events TTL +func (e *eventBroadcasterImpl) refreshExistingEventSeries() { + // TODO: Investigate whether lock contention won't be a problem + e.mu.Lock() + defer e.mu.Unlock() + for isomorphicKey, event := range e.eventCache { + if event.Series != nil { + if recordedEvent, retry := recordEvent(e.sink, event); !retry { + if recordedEvent != nil { + e.eventCache[isomorphicKey] = recordedEvent + } + } + } + } +} + +// finishSeries checks if a series has ended and either: +// - write final count to the apiserver +// - delete a singleton event (i.e. series field is nil) from the cache +func (e *eventBroadcasterImpl) finishSeries() { + // TODO: Investigate whether lock contention won't be a problem + e.mu.Lock() + defer e.mu.Unlock() + for isomorphicKey, event := range e.eventCache { + eventSerie := event.Series + if eventSerie != nil { + if eventSerie.LastObservedTime.Time.Before(time.Now().Add(-finishTime)) { + if _, retry := recordEvent(e.sink, event); !retry { + delete(e.eventCache, isomorphicKey) + } + } + } else if event.EventTime.Time.Before(time.Now().Add(-finishTime)) { + delete(e.eventCache, isomorphicKey) + } + } +} + +// NewRecorder returns an EventRecorder that records events with the given event source. +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder { + hostname, _ := os.Hostname() + reportingInstance := reportingController + "-" + hostname + return &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}} +} + +func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.Clock) { + // Make a copy before modification, because there could be multiple listeners. + eventCopy := event.DeepCopy() + go func() { + evToRecord := func() *eventsv1.Event { + e.mu.Lock() + defer e.mu.Unlock() + eventKey := getKey(eventCopy) + isomorphicEvent, isIsomorphic := e.eventCache[eventKey] + if isIsomorphic { + if isomorphicEvent.Series != nil { + isomorphicEvent.Series.Count++ + isomorphicEvent.Series.LastObservedTime = metav1.MicroTime{Time: clock.Now()} + return nil + } + isomorphicEvent.Series = &eventsv1.EventSeries{ + Count: 1, + LastObservedTime: metav1.MicroTime{Time: clock.Now()}, + } + return isomorphicEvent + } + e.eventCache[eventKey] = eventCopy + return eventCopy + }() + if evToRecord != nil { + recordedEvent := e.attemptRecording(evToRecord) + if recordedEvent != nil { + recordedEventKey := getKey(recordedEvent) + e.mu.Lock() + defer e.mu.Unlock() + e.eventCache[recordedEventKey] = recordedEvent + } + } + }() +} + +func (e *eventBroadcasterImpl) attemptRecording(event *eventsv1.Event) *eventsv1.Event { + tries := 0 + for { + if recordedEvent, retry := recordEvent(e.sink, event); !retry { + return recordedEvent + } + tries++ + if tries >= maxTriesPerEvent { + klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + return nil + } + // Randomize sleep so that various clients won't all be + // synced up if the master goes down. + time.Sleep(wait.Jitter(e.sleepDuration, 0.25)) + } +} + +func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) { + var newEvent *eventsv1.Event + var err error + isEventSeries := event.Series != nil + if isEventSeries { + patch, patchBytesErr := createPatchBytesForSeries(event) + if patchBytesErr != nil { + klog.Errorf("Unable to calculate diff, no merge is possible: %v", patchBytesErr) + return nil, false + } + newEvent, err = sink.Patch(event, patch) + } + // Update can fail because the event may have been removed and it no longer exists. + if !isEventSeries || (isEventSeries && util.IsKeyNotFoundError(err)) { + // Making sure that ResourceVersion is empty on creation + event.ResourceVersion = "" + newEvent, err = sink.Create(event) + } + if err == nil { + return newEvent, false + } + // If we can't contact the server, then hold everything while we keep trying. + // Otherwise, something about the event is malformed and we should abandon it. + switch err.(type) { + case *restclient.RequestConstructionError: + // We will construct the request the same next time, so don't keep trying. + klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + return nil, false + case *errors.StatusError: + if errors.IsAlreadyExists(err) { + klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } else { + klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } + return nil, false + case *errors.UnexpectedObjectError: + // We don't expect this; it implies the server's response didn't match a + // known pattern. Go ahead and retry. + default: + // This case includes actual http transport errors. Go ahead and retry. + } + klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + return nil, true +} + +func createPatchBytesForSeries(event *eventsv1.Event) ([]byte, error) { + oldEvent := event.DeepCopy() + oldEvent.Series = nil + oldData, err := json.Marshal(oldEvent) + if err != nil { + return nil, err + } + newData, err := json.Marshal(event) + if err != nil { + return nil, err + } + return strategicpatch.CreateTwoWayMergePatch(oldData, newData, eventsv1.Event{}) +} + +func getKey(event *eventsv1.Event) eventKey { + key := eventKey{ + action: event.Action, + reason: event.Reason, + reportingController: event.ReportingController, + regarding: event.Regarding, + } + if event.Related != nil { + key.related = *event.Related + } + return key +} + +// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. +// The return value is used to stop recording +func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) func() { + watcher := e.Watch() + go func() { + defer utilruntime.HandleCrash() + for { + watchEvent, ok := <-watcher.ResultChan() + if !ok { + return + } + eventHandler(watchEvent.Object) + } + }() + return watcher.Stop +} + +func (e *eventBroadcasterImpl) startRecordingEvents(stopCh <-chan struct{}) { + eventHandler := func(obj runtime.Object) { + event, ok := obj.(*eventsv1.Event) + if !ok { + klog.Errorf("unexpected type, expected eventsv1.Event") + return + } + e.recordToSink(event, clock.RealClock{}) + } + stopWatcher := e.StartEventWatcher(eventHandler) + go func() { + <-stopCh + stopWatcher() + }() +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +func (e *eventBroadcasterImpl) StartRecordingToSink(stopCh <-chan struct{}) { + go wait.Until(e.refreshExistingEventSeries, refreshTime, stopCh) + go wait.Until(e.finishSeries, finishTime, stopCh) + e.startRecordingEvents(stopCh) +} + +type eventBroadcasterAdapterImpl struct { + coreClient typedv1core.EventsGetter + coreBroadcaster record.EventBroadcaster + eventsv1Client typedeventsv1.EventsV1Interface + eventsv1Broadcaster EventBroadcaster +} + +// NewEventBroadcasterAdapter creates a wrapper around new and legacy broadcasters to simplify +// migration of individual components to the new Event API. +func NewEventBroadcasterAdapter(client clientset.Interface) EventBroadcasterAdapter { + eventClient := &eventBroadcasterAdapterImpl{} + if _, err := client.Discovery().ServerResourcesForGroupVersion(eventsv1.SchemeGroupVersion.String()); err == nil { + eventClient.eventsv1Client = client.EventsV1() + eventClient.eventsv1Broadcaster = NewBroadcaster(&EventSinkImpl{Interface: eventClient.eventsv1Client}) + } + // Even though there can soon exist cases when coreBroadcaster won't really be needed, + // we create it unconditionally because its overhead is minor and will simplify using usage + // patterns of this library in all components. + eventClient.coreClient = client.CoreV1() + eventClient.coreBroadcaster = record.NewBroadcaster() + return eventClient +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +func (e *eventBroadcasterAdapterImpl) StartRecordingToSink(stopCh <-chan struct{}) { + if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil { + e.eventsv1Broadcaster.StartRecordingToSink(stopCh) + } + if e.coreBroadcaster != nil && e.coreClient != nil { + e.coreBroadcaster.StartRecordingToSink(&typedv1core.EventSinkImpl{Interface: e.coreClient.Events("")}) + } +} + +func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorder { + if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil { + return e.eventsv1Broadcaster.NewRecorder(scheme.Scheme, name) + } + return record.NewEventRecorderAdapter(e.DeprecatedNewLegacyRecorder(name)) +} + +func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorder { + return e.coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: name}) +} + +func (e *eventBroadcasterAdapterImpl) Shutdown() { + if e.coreBroadcaster != nil { + e.coreBroadcaster.Shutdown() + } + if e.eventsv1Broadcaster != nil { + e.eventsv1Broadcaster.Shutdown() + } +} diff --git a/vendor/k8s.io/client-go/tools/events/event_recorder.go b/vendor/k8s.io/client-go/tools/events/event_recorder.go new file mode 100644 index 0000000000..2837cc1603 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/event_recorder.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + eventsv1 "k8s.io/api/events/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/record/util" + "k8s.io/client-go/tools/reference" + "k8s.io/klog/v2" +) + +type recorderImpl struct { + scheme *runtime.Scheme + reportingController string + reportingInstance string + *watch.Broadcaster + clock clock.Clock +} + +func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + timestamp := metav1.MicroTime{time.Now()} + message := fmt.Sprintf(note, args...) + refRegarding, err := reference.GetReference(recorder.scheme, regarding) + if err != nil { + klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", regarding, err, eventtype, reason, message) + return + } + refRelated, err := reference.GetReference(recorder.scheme, related) + if err != nil { + klog.V(9).Infof("Could not construct reference to: '%#v' due to: '%v'.", related, err) + } + if !util.ValidateEventType(eventtype) { + klog.Errorf("Unsupported event type: '%v'", eventtype) + return + } + event := recorder.makeEvent(refRegarding, refRelated, timestamp, eventtype, reason, message, recorder.reportingController, recorder.reportingInstance, action) + go func() { + defer utilruntime.HandleCrash() + recorder.Action(watch.Added, event) + }() +} + +func (recorder *recorderImpl) makeEvent(refRegarding *v1.ObjectReference, refRelated *v1.ObjectReference, timestamp metav1.MicroTime, eventtype, reason, message string, reportingController string, reportingInstance string, action string) *eventsv1.Event { + t := metav1.Time{Time: recorder.clock.Now()} + namespace := refRegarding.Namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + return &eventsv1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", refRegarding.Name, t.UnixNano()), + Namespace: namespace, + }, + EventTime: timestamp, + Series: nil, + ReportingController: reportingController, + ReportingInstance: reportingInstance, + Action: action, + Reason: reason, + Regarding: *refRegarding, + Related: refRelated, + Note: message, + Type: eventtype, + } +} diff --git a/vendor/k8s.io/client-go/tools/events/fake.go b/vendor/k8s.io/client-go/tools/events/fake.go new file mode 100644 index 0000000000..d572e0d3e1 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/fake.go @@ -0,0 +1,45 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" +) + +// FakeRecorder is used as a fake during tests. It is thread safe. It is usable +// when created manually and not by NewFakeRecorder, however all events may be +// thrown away in this case. +type FakeRecorder struct { + Events chan string +} + +// Eventf emits an event +func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + if f.Events != nil { + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+note, args...) + } +} + +// NewFakeRecorder creates new fake event recorder with event channel with +// buffer of given size. +func NewFakeRecorder(bufferSize int) *FakeRecorder { + return &FakeRecorder{ + Events: make(chan string, bufferSize), + } +} diff --git a/vendor/k8s.io/client-go/tools/events/interfaces.go b/vendor/k8s.io/client-go/tools/events/interfaces.go new file mode 100644 index 0000000000..f1a523caa8 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/interfaces.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + eventsv1 "k8s.io/api/events/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" +) + +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Eventf constructs an event from the given information and puts it in the queue for sending. + // 'regarding' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'related' is the secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'action' explains what happened with regarding/what action did the ReportingController + // (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.) + // take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter). + // 'note' is intended to be human readable. + Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) +} + +// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. +type EventBroadcaster interface { + // StartRecordingToSink starts sending events received from the specified eventBroadcaster. + StartRecordingToSink(stopCh <-chan struct{}) + + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster + // with the event source set to the given event source. + NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder + + // StartEventWatcher enables you to watch for emitted events without usage + // of StartRecordingToSink. This lets you also process events in a custom way (e.g. in tests). + // NOTE: events received on your eventHandler should be copied before being used. + // TODO: figure out if this can be removed. + StartEventWatcher(eventHandler func(event runtime.Object)) func() + + // Shutdown shuts down the broadcaster + Shutdown() +} + +// EventSink knows how to store events (client-go implements it.) +// EventSink must respect the namespace that will be embedded in 'event'. +// It is assumed that EventSink will return the same sorts of errors as +// client-go's REST client. +type EventSink interface { + Create(event *eventsv1.Event) (*eventsv1.Event, error) + Update(event *eventsv1.Event) (*eventsv1.Event, error) + Patch(oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error) +} + +// EventBroadcasterAdapter is a auxiliary interface to simplify migration to +// the new events API. It is a wrapper around new and legacy broadcasters +// that smartly chooses which one to use. +// +// Deprecated: This interface will be removed once migration is completed. +type EventBroadcasterAdapter interface { + // StartRecordingToSink starts sending events received from the specified eventBroadcaster. + StartRecordingToSink(stopCh <-chan struct{}) + + // NewRecorder creates a new Event Recorder with specified name. + NewRecorder(name string) EventRecorder + + // DeprecatedNewLegacyRecorder creates a legacy Event Recorder with specific name. + DeprecatedNewLegacyRecorder(name string) record.EventRecorder + + // Shutdown shuts down the broadcaster. + Shutdown() +} diff --git a/vendor/k8s.io/component-base/metrics/prometheus/workqueue/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/workqueue/metrics.go new file mode 100644 index 0000000000..a0192acb07 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheus/workqueue/metrics.go @@ -0,0 +1,130 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "k8s.io/client-go/util/workqueue" + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +// Package prometheus sets the workqueue DefaultMetricsFactory to produce +// prometheus metrics. To use this package, you just have to import it. + +// Metrics subsystem and keys used by the workqueue. +const ( + WorkQueueSubsystem = "workqueue" + DepthKey = "depth" + AddsKey = "adds_total" + QueueLatencyKey = "queue_duration_seconds" + WorkDurationKey = "work_duration_seconds" + UnfinishedWorkKey = "unfinished_work_seconds" + LongestRunningProcessorKey = "longest_running_processor_seconds" + RetriesKey = "retries_total" +) + +var ( + depth = k8smetrics.NewGaugeVec(&k8smetrics.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: DepthKey, + Help: "Current depth of workqueue", + }, []string{"name"}) + + adds = k8smetrics.NewCounterVec(&k8smetrics.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: AddsKey, + Help: "Total number of adds handled by workqueue", + }, []string{"name"}) + + latency = k8smetrics.NewHistogramVec(&k8smetrics.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: QueueLatencyKey, + Help: "How long in seconds an item stays in workqueue before being requested.", + Buckets: k8smetrics.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + workDuration = k8smetrics.NewHistogramVec(&k8smetrics.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: WorkDurationKey, + Help: "How long in seconds processing an item from workqueue takes.", + Buckets: k8smetrics.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + unfinished = k8smetrics.NewGaugeVec(&k8smetrics.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: UnfinishedWorkKey, + Help: "How many seconds of work has done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }, []string{"name"}) + + longestRunningProcessor = k8smetrics.NewGaugeVec(&k8smetrics.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: LongestRunningProcessorKey, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + }, []string{"name"}) + + retries = k8smetrics.NewCounterVec(&k8smetrics.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: RetriesKey, + Help: "Total number of retries handled by workqueue", + }, []string{"name"}) + + metrics = []k8smetrics.Registerable{ + depth, adds, latency, workDuration, unfinished, longestRunningProcessor, retries, + } +) + +type prometheusMetricsProvider struct { +} + +func init() { + for _, m := range metrics { + legacyregistry.MustRegister(m) + } + workqueue.SetProvider(prometheusMetricsProvider{}) +} + +func (prometheusMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + return depth.WithLabelValues(name) +} + +func (prometheusMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + return adds.WithLabelValues(name) +} + +func (prometheusMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + return latency.WithLabelValues(name) +} + +func (prometheusMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + return workDuration.WithLabelValues(name) +} + +func (prometheusMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + return unfinished.WithLabelValues(name) +} + +func (prometheusMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + return longestRunningProcessor.WithLabelValues(name) +} + +func (prometheusMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + return retries.WithLabelValues(name) +} diff --git a/vendor/k8s.io/component-base/metrics/testutil/metrics.go b/vendor/k8s.io/component-base/metrics/testutil/metrics.go new file mode 100644 index 0000000000..3896550122 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/testutil/metrics.go @@ -0,0 +1,368 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "fmt" + "io" + "math" + "reflect" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "k8s.io/component-base/metrics" +) + +var ( + // MetricNameLabel is label under which model.Sample stores metric name + MetricNameLabel model.LabelName = model.MetricNameLabel + // QuantileLabel is label under which model.Sample stores latency quantile value + QuantileLabel model.LabelName = model.QuantileLabel +) + +// Metrics is generic metrics for other specific metrics +type Metrics map[string]model.Samples + +// Equal returns true if all metrics are the same as the arguments. +func (m *Metrics) Equal(o Metrics) bool { + var leftKeySet []string + var rightKeySet []string + for k := range *m { + leftKeySet = append(leftKeySet, k) + } + for k := range o { + rightKeySet = append(rightKeySet, k) + } + if !reflect.DeepEqual(leftKeySet, rightKeySet) { + return false + } + for _, k := range leftKeySet { + if !(*m)[k].Equal(o[k]) { + return false + } + } + return true +} + +// NewMetrics returns new metrics which are initialized. +func NewMetrics() Metrics { + result := make(Metrics) + return result +} + +// ParseMetrics parses Metrics from data returned from prometheus endpoint +func ParseMetrics(data string, output *Metrics) error { + dec := expfmt.NewDecoder(strings.NewReader(data), expfmt.FmtText) + decoder := expfmt.SampleDecoder{ + Dec: dec, + Opts: &expfmt.DecodeOptions{}, + } + + for { + var v model.Vector + if err := decoder.Decode(&v); err != nil { + if err == io.EOF { + // Expected loop termination condition. + return nil + } + continue + } + for _, metric := range v { + name := string(metric.Metric[MetricNameLabel]) + (*output)[name] = append((*output)[name], metric) + } + } +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +func TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + var textParser expfmt.TextParser + return textParser.TextToMetricFamilies(in) +} + +// PrintSample returns formatted representation of metric Sample +func PrintSample(sample *model.Sample) string { + buf := make([]string, 0) + // Id is a VERY special label. For 'normal' container it's useless, but it's necessary + // for 'system' containers (e.g. /docker-daemon, /kubelet, etc.). We know if that's the + // case by checking if there's a label "kubernetes_container_name" present. It's hacky + // but it works... + _, normalContainer := sample.Metric["kubernetes_container_name"] + for k, v := range sample.Metric { + if strings.HasPrefix(string(k), "__") { + continue + } + + if string(k) == "id" && normalContainer { + continue + } + buf = append(buf, fmt.Sprintf("%v=%v", string(k), v)) + } + return fmt.Sprintf("[%v] = %v", strings.Join(buf, ","), sample.Value) +} + +// ComputeHistogramDelta computes the change in histogram metric for a selected label. +// Results are stored in after samples +func ComputeHistogramDelta(before, after model.Samples, label model.LabelName) { + beforeSamplesMap := make(map[string]*model.Sample) + for _, bSample := range before { + beforeSamplesMap[makeKey(bSample.Metric[label], bSample.Metric["le"])] = bSample + } + for _, aSample := range after { + if bSample, found := beforeSamplesMap[makeKey(aSample.Metric[label], aSample.Metric["le"])]; found { + aSample.Value = aSample.Value - bSample.Value + } + } +} + +func makeKey(a, b model.LabelValue) string { + return string(a) + "___" + string(b) +} + +// GetMetricValuesForLabel returns value of metric for a given dimension +func GetMetricValuesForLabel(ms Metrics, metricName, label string) map[string]int64 { + samples, found := ms[metricName] + result := make(map[string]int64, len(samples)) + if !found { + return result + } + for _, sample := range samples { + count := int64(sample.Value) + dimensionName := string(sample.Metric[model.LabelName(label)]) + result[dimensionName] = count + } + return result +} + +// ValidateMetrics verifies if every sample of metric has all expected labels +func ValidateMetrics(metrics Metrics, metricName string, expectedLabels ...string) error { + samples, ok := metrics[metricName] + if !ok { + return fmt.Errorf("metric %q was not found in metrics", metricName) + } + for _, sample := range samples { + for _, l := range expectedLabels { + if _, ok := sample.Metric[model.LabelName(l)]; !ok { + return fmt.Errorf("metric %q is missing label %q, sample: %q", metricName, l, sample.String()) + } + } + } + return nil +} + +// Histogram wraps prometheus histogram DTO (data transfer object) +type Histogram struct { + *dto.Histogram +} + +// GetHistogramFromGatherer collects a metric from a gatherer implementing k8s.io/component-base/metrics.Gatherer interface. +// Used only for testing purposes where we need to gather metrics directly from a running binary (without metrics endpoint). +func GetHistogramFromGatherer(gatherer metrics.Gatherer, metricName string) (Histogram, error) { + var metricFamily *dto.MetricFamily + m, err := gatherer.Gather() + if err != nil { + return Histogram{}, err + } + for _, mFamily := range m { + if mFamily.GetName() == metricName { + metricFamily = mFamily + break + } + } + + if metricFamily == nil { + return Histogram{}, fmt.Errorf("metric %q not found", metricName) + } + + if metricFamily.GetMetric() == nil { + return Histogram{}, fmt.Errorf("metric %q is empty", metricName) + } + + if len(metricFamily.GetMetric()) == 0 { + return Histogram{}, fmt.Errorf("metric %q is empty", metricName) + } + + return Histogram{ + // Histograms are stored under the first index (based on observation). + // Given there's only one histogram registered per each metric name, accessing + // the first index is sufficient. + metricFamily.GetMetric()[0].GetHistogram(), + }, nil +} + +func uint64Ptr(u uint64) *uint64 { + return &u +} + +// Bucket of a histogram +type bucket struct { + upperBound float64 + count float64 +} + +func bucketQuantile(q float64, buckets []bucket) float64 { + if q < 0 { + return math.Inf(-1) + } + if q > 1 { + return math.Inf(+1) + } + + if len(buckets) < 2 { + return math.NaN() + } + + rank := q * buckets[len(buckets)-1].count + b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank }) + + if b == 0 { + return buckets[0].upperBound * (rank / buckets[0].count) + } + + if b == len(buckets)-1 && math.IsInf(buckets[b].upperBound, 1) { + return buckets[len(buckets)-2].upperBound + } + + // linear approximation of b-th bucket + brank := rank - buckets[b-1].count + bSize := buckets[b].upperBound - buckets[b-1].upperBound + bCount := buckets[b].count - buckets[b-1].count + + return buckets[b-1].upperBound + bSize*(brank/bCount) +} + +// Quantile computes q-th quantile of a cumulative histogram. +// It's expected the histogram is valid (by calling Validate) +func (hist *Histogram) Quantile(q float64) float64 { + var buckets []bucket + + for _, bckt := range hist.Bucket { + buckets = append(buckets, bucket{ + count: float64(bckt.GetCumulativeCount()), + upperBound: bckt.GetUpperBound(), + }) + } + + if len(buckets) == 0 || buckets[len(buckets)-1].upperBound != math.Inf(+1) { + // The list of buckets in dto.Histogram doesn't include the final +Inf bucket, so we + // add it here for the reset of the samples. + buckets = append(buckets, bucket{ + count: float64(hist.GetSampleCount()), + upperBound: math.Inf(+1), + }) + } + + return bucketQuantile(q, buckets) +} + +// Average computes histogram's average value +func (hist *Histogram) Average() float64 { + return hist.GetSampleSum() / float64(hist.GetSampleCount()) +} + +// Clear clears all fields of the wrapped histogram +func (hist *Histogram) Clear() { + if hist.SampleCount != nil { + *hist.SampleCount = 0 + } + if hist.SampleSum != nil { + *hist.SampleSum = 0 + } + for _, b := range hist.Bucket { + if b.CumulativeCount != nil { + *b.CumulativeCount = 0 + } + } +} + +// Validate makes sure the wrapped histogram has all necessary fields set and with valid values. +func (hist *Histogram) Validate() error { + if hist.SampleCount == nil || hist.GetSampleCount() == 0 { + return fmt.Errorf("nil or empty histogram SampleCount") + } + + if hist.SampleSum == nil || hist.GetSampleSum() == 0 { + return fmt.Errorf("nil or empty histogram SampleSum") + } + + for _, bckt := range hist.Bucket { + if bckt == nil { + return fmt.Errorf("empty histogram bucket") + } + if bckt.UpperBound == nil || bckt.GetUpperBound() < 0 { + return fmt.Errorf("nil or negative histogram bucket UpperBound") + } + } + + return nil +} + +// GetGaugeMetricValue extract metric value from GaugeMetric +func GetGaugeMetricValue(m metrics.GaugeMetric) (float64, error) { + metricProto := &dto.Metric{} + if err := m.Write(metricProto); err != nil { + return 0, fmt.Errorf("error writing m: %v", err) + } + return metricProto.Gauge.GetValue(), nil +} + +// GetCounterMetricValue extract metric value from CounterMetric +func GetCounterMetricValue(m metrics.CounterMetric) (float64, error) { + metricProto := &dto.Metric{} + if err := m.(metrics.Metric).Write(metricProto); err != nil { + return 0, fmt.Errorf("error writing m: %v", err) + } + return metricProto.Counter.GetValue(), nil +} + +// GetHistogramMetricValue extract sum of all samples from ObserverMetric +func GetHistogramMetricValue(m metrics.ObserverMetric) (float64, error) { + metricProto := &dto.Metric{} + if err := m.(metrics.Metric).Write(metricProto); err != nil { + return 0, fmt.Errorf("error writing m: %v", err) + } + return metricProto.Histogram.GetSampleSum(), nil +} + +// LabelsMatch returns true if metric has all expected labels otherwise false +func LabelsMatch(metric *dto.Metric, labelFilter map[string]string) bool { + metricLabels := map[string]string{} + + for _, labelPair := range metric.Label { + metricLabels[labelPair.GetName()] = labelPair.GetValue() + } + + // length comparison then match key to values in the maps + if len(labelFilter) > len(metricLabels) { + return false + } + + for labelName, labelValue := range labelFilter { + if value, ok := metricLabels[labelName]; !ok || value != labelValue { + return false + } + } + + return true +} diff --git a/vendor/k8s.io/component-base/metrics/testutil/promlint.go b/vendor/k8s.io/component-base/metrics/testutil/promlint.go new file mode 100644 index 0000000000..33b83f05c5 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/testutil/promlint.go @@ -0,0 +1,156 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "fmt" + "io" + "strings" + + "github.com/prometheus/client_golang/prometheus/testutil/promlint" +) + +// exceptionMetrics is an exception list of metrics which violates promlint rules. +// +// The original entries come from the existing metrics when we introduce promlint. +// We setup this list for allow and not fail on the current violations. +// Generally speaking, you need to fix the problem for a new metric rather than add it into the list. +var exceptionMetrics = []string{ + // k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/egressselector + "apiserver_egress_dialer_dial_failure_count", // counter metrics should have "_total" suffix + + // k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz + "apiserver_request_total", // label names should be written in 'snake_case' not 'camelCase' + + // k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters + "authenticated_user_requests", // counter metrics should have "_total" suffix + "authentication_attempts", // counter metrics should have "_total" suffix + + // kube-apiserver + "aggregator_openapi_v2_regeneration_count", + "apiserver_admission_step_admission_duration_seconds_summary", + "apiserver_current_inflight_requests", + "apiserver_longrunning_gauge", + "get_token_count", + "get_token_fail_count", + "ssh_tunnel_open_count", + "ssh_tunnel_open_fail_count", + + // kube-controller-manager + "attachdetach_controller_forced_detaches", + "authenticated_user_requests", + "authentication_attempts", + "get_token_count", + "get_token_fail_count", + "node_collector_evictions_number", + + // k8s.io/kubernetes/pkg/kubelet/server/stats + // The two metrics have been deprecated and will be removed in release v1.20+. + "container_cpu_usage_seconds_total", // non-counter metrics should not have "_total" suffix + "node_cpu_usage_seconds_total", // non-counter metrics should not have "_total" suffix +} + +// A Problem is an issue detected by a Linter. +type Problem promlint.Problem + +func (p *Problem) String() string { + return fmt.Sprintf("%s:%s", p.Metric, p.Text) +} + +// A Linter is a Prometheus metrics linter. It identifies issues with metric +// names, types, and metadata, and reports them to the caller. +type Linter struct { + promLinter *promlint.Linter +} + +// Lint performs a linting pass, returning a slice of Problems indicating any +// issues found in the metrics stream. The slice is sorted by metric name +// and issue description. +func (l *Linter) Lint() ([]Problem, error) { + promProblems, err := l.promLinter.Lint() + if err != nil { + return nil, err + } + + // Ignore problems those in exception list + problems := make([]Problem, 0, len(promProblems)) + for i := range promProblems { + if !l.shouldIgnore(promProblems[i].Metric) { + problems = append(problems, Problem(promProblems[i])) + } + } + + return problems, nil +} + +// shouldIgnore returns true if metric in the exception list, otherwise returns false. +func (l *Linter) shouldIgnore(metricName string) bool { + for i := range exceptionMetrics { + if metricName == exceptionMetrics[i] { + return true + } + } + + return false +} + +// NewPromLinter creates a new Linter that reads an input stream of Prometheus metrics. +// Only the text exposition format is supported. +func NewPromLinter(r io.Reader) *Linter { + return &Linter{ + promLinter: promlint.New(r), + } +} + +func mergeProblems(problems []Problem) string { + var problemsMsg []string + + for index := range problems { + problemsMsg = append(problemsMsg, problems[index].String()) + } + + return strings.Join(problemsMsg, ",") +} + +// shouldIgnore returns true if metric in the exception list, otherwise returns false. +func shouldIgnore(metricName string) bool { + for i := range exceptionMetrics { + if metricName == exceptionMetrics[i] { + return true + } + } + + return false +} + +// getLintError will ignore the metrics in exception list and converts lint problem to error. +func getLintError(problems []promlint.Problem) error { + var filteredProblems []Problem + for _, problem := range problems { + if shouldIgnore(problem.Metric) { + continue + } + + filteredProblems = append(filteredProblems, Problem(problem)) + } + + if len(filteredProblems) == 0 { + return nil + } + + return fmt.Errorf("lint error: %s", mergeProblems(filteredProblems)) +} diff --git a/vendor/k8s.io/component-base/metrics/testutil/testutil.go b/vendor/k8s.io/component-base/metrics/testutil/testutil.go new file mode 100644 index 0000000000..439045989c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/testutil/testutil.go @@ -0,0 +1,86 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "fmt" + "io" + + "github.com/prometheus/client_golang/prometheus/testutil" + + apimachineryversion "k8s.io/apimachinery/pkg/version" + "k8s.io/component-base/metrics" +) + +// CollectAndCompare registers the provided Collector with a newly created +// pedantic Registry. It then does the same as GatherAndCompare, gathering the +// metrics from the pedantic Registry. +func CollectAndCompare(c metrics.Collector, expected io.Reader, metricNames ...string) error { + lintProblems, err := testutil.CollectAndLint(c, metricNames...) + if err != nil { + return err + } + if err := getLintError(lintProblems); err != nil { + return err + } + + return testutil.CollectAndCompare(c, expected, metricNames...) +} + +// GatherAndCompare gathers all metrics from the provided Gatherer and compares +// it to an expected output read from the provided Reader in the Prometheus text +// exposition format. If any metricNames are provided, only metrics with those +// names are compared. +func GatherAndCompare(g metrics.Gatherer, expected io.Reader, metricNames ...string) error { + lintProblems, err := testutil.GatherAndLint(g, metricNames...) + if err != nil { + return err + } + if err := getLintError(lintProblems); err != nil { + return err + } + + return testutil.GatherAndCompare(g, expected, metricNames...) +} + +// CustomCollectAndCompare registers the provided StableCollector with a newly created +// registry. It then does the same as GatherAndCompare, gathering the +// metrics from the pedantic Registry. +func CustomCollectAndCompare(c metrics.StableCollector, expected io.Reader, metricNames ...string) error { + registry := metrics.NewKubeRegistry() + registry.CustomMustRegister(c) + + return GatherAndCompare(registry, expected, metricNames...) +} + +// NewFakeKubeRegistry creates a fake `KubeRegistry` that takes the input version as `build in version`. +// It should only be used in testing scenario especially for the deprecated metrics. +// The input version format should be `major.minor.patch`, e.g. '1.18.0'. +func NewFakeKubeRegistry(ver string) metrics.KubeRegistry { + backup := metrics.BuildVersion + defer func() { + metrics.BuildVersion = backup + }() + + metrics.BuildVersion = func() apimachineryversion.Info { + return apimachineryversion.Info{ + GitVersion: fmt.Sprintf("v%s-alpha+1.12345", ver), + } + } + + return metrics.NewKubeRegistry() +} diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/doc.go b/vendor/k8s.io/kube-openapi/pkg/builder/doc.go new file mode 100644 index 0000000000..c3109067f2 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/builder/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package builder contains code to generate OpenAPI discovery spec (which +// initial version of it also known as Swagger 2.0). +// For more details: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md +package builder diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go new file mode 100644 index 0000000000..d04080d57d --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go @@ -0,0 +1,445 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + restful "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" + + "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/util" +) + +const ( + OpenAPIVersion = "2.0" +) + +type openAPI struct { + config *common.Config + swagger *spec.Swagger + protocolList []string + definitions map[string]common.OpenAPIDefinition +} + +// BuildOpenAPISpec builds OpenAPI spec given a list of webservices (containing routes) and common.Config to customize it. +func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec.Swagger, error) { + o := newOpenAPI(config) + err := o.buildPaths(webServices) + if err != nil { + return nil, err + } + return o.finalizeSwagger() +} + +// BuildOpenAPIDefinitionsForResource builds a partial OpenAPI spec given a sample object and common.Config to customize it. +func BuildOpenAPIDefinitionsForResource(model interface{}, config *common.Config) (*spec.Definitions, error) { + o := newOpenAPI(config) + // We can discard the return value of toSchema because all we care about is the side effect of calling it. + // All the models created for this resource get added to o.swagger.Definitions + _, err := o.toSchema(util.GetCanonicalTypeName(model)) + if err != nil { + return nil, err + } + swagger, err := o.finalizeSwagger() + if err != nil { + return nil, err + } + return &swagger.Definitions, nil +} + +// BuildOpenAPIDefinitionsForResources returns the OpenAPI spec which includes the definitions for the +// passed type names. +func BuildOpenAPIDefinitionsForResources(config *common.Config, names ...string) (*spec.Swagger, error) { + o := newOpenAPI(config) + // We can discard the return value of toSchema because all we care about is the side effect of calling it. + // All the models created for this resource get added to o.swagger.Definitions + for _, name := range names { + _, err := o.toSchema(name) + if err != nil { + return nil, err + } + } + return o.finalizeSwagger() +} + +// newOpenAPI sets up the openAPI object so we can build the spec. +func newOpenAPI(config *common.Config) openAPI { + o := openAPI{ + config: config, + swagger: &spec.Swagger{ + SwaggerProps: spec.SwaggerProps{ + Swagger: OpenAPIVersion, + Definitions: spec.Definitions{}, + Responses: config.ResponseDefinitions, + Paths: &spec.Paths{Paths: map[string]spec.PathItem{}}, + Info: config.Info, + }, + }, + } + if o.config.GetOperationIDAndTags == nil { + o.config.GetOperationIDAndTags = func(r *restful.Route) (string, []string, error) { + return r.Operation, nil, nil + } + } + if o.config.GetDefinitionName == nil { + o.config.GetDefinitionName = func(name string) (string, spec.Extensions) { + return name[strings.LastIndex(name, "/")+1:], nil + } + } + o.definitions = o.config.GetDefinitions(func(name string) spec.Ref { + defName, _ := o.config.GetDefinitionName(name) + return spec.MustCreateRef("#/definitions/" + common.EscapeJsonPointer(defName)) + }) + if o.config.CommonResponses == nil { + o.config.CommonResponses = map[int]spec.Response{} + } + return o +} + +// finalizeSwagger is called after the spec is built and returns the final spec. +// NOTE: finalizeSwagger also make changes to the final spec, as specified in the config. +func (o *openAPI) finalizeSwagger() (*spec.Swagger, error) { + if o.config.SecurityDefinitions != nil { + o.swagger.SecurityDefinitions = *o.config.SecurityDefinitions + o.swagger.Security = o.config.DefaultSecurity + } + if o.config.PostProcessSpec != nil { + var err error + o.swagger, err = o.config.PostProcessSpec(o.swagger) + if err != nil { + return nil, err + } + } + + return o.swagger, nil +} + +func (o *openAPI) buildDefinitionRecursively(name string) error { + uniqueName, extensions := o.config.GetDefinitionName(name) + if _, ok := o.swagger.Definitions[uniqueName]; ok { + return nil + } + if item, ok := o.definitions[name]; ok { + schema := spec.Schema{ + VendorExtensible: item.Schema.VendorExtensible, + SchemaProps: item.Schema.SchemaProps, + SwaggerSchemaProps: item.Schema.SwaggerSchemaProps, + } + if extensions != nil { + if schema.Extensions == nil { + schema.Extensions = spec.Extensions{} + } + for k, v := range extensions { + schema.Extensions[k] = v + } + } + if v, ok := item.Schema.Extensions[common.ExtensionV2Schema]; ok { + if v2Schema, isOpenAPISchema := v.(spec.Schema); isOpenAPISchema { + schema = v2Schema + } + } + o.swagger.Definitions[uniqueName] = schema + for _, v := range item.Dependencies { + if err := o.buildDefinitionRecursively(v); err != nil { + return err + } + } + } else { + return fmt.Errorf("cannot find model definition for %v. If you added a new type, you may need to add +k8s:openapi-gen=true to the package or type and run code-gen again", name) + } + return nil +} + +// buildDefinitionForType build a definition for a given type and return a referable name to its definition. +// This is the main function that keep track of definitions used in this spec and is depend on code generated +// by k8s.io/kubernetes/cmd/libs/go2idl/openapi-gen. +func (o *openAPI) buildDefinitionForType(name string) (string, error) { + if err := o.buildDefinitionRecursively(name); err != nil { + return "", err + } + defName, _ := o.config.GetDefinitionName(name) + return "#/definitions/" + common.EscapeJsonPointer(defName), nil +} + +// buildPaths builds OpenAPI paths using go-restful's web services. +func (o *openAPI) buildPaths(webServices []*restful.WebService) error { + pathsToIgnore := util.NewTrie(o.config.IgnorePrefixes) + duplicateOpId := make(map[string]string) + for _, w := range webServices { + rootPath := w.RootPath() + if pathsToIgnore.HasPrefix(rootPath) { + continue + } + commonParams, err := o.buildParameters(w.PathParameters()) + if err != nil { + return err + } + for path, routes := range groupRoutesByPath(w.Routes()) { + // go-swagger has special variable definition {$NAME:*} that can only be + // used at the end of the path and it is not recognized by OpenAPI. + if strings.HasSuffix(path, ":*}") { + path = path[:len(path)-3] + "}" + } + if pathsToIgnore.HasPrefix(path) { + continue + } + // Aggregating common parameters make API spec (and generated clients) simpler + inPathCommonParamsMap, err := o.findCommonParameters(routes) + if err != nil { + return err + } + pathItem, exists := o.swagger.Paths.Paths[path] + if exists { + return fmt.Errorf("duplicate webservice route has been found for path: %v", path) + } + pathItem = spec.PathItem{ + PathItemProps: spec.PathItemProps{ + Parameters: make([]spec.Parameter, 0), + }, + } + // add web services's parameters as well as any parameters appears in all ops, as common parameters + pathItem.Parameters = append(pathItem.Parameters, commonParams...) + for _, p := range inPathCommonParamsMap { + pathItem.Parameters = append(pathItem.Parameters, p) + } + sortParameters(pathItem.Parameters) + for _, route := range routes { + op, err := o.buildOperations(route, inPathCommonParamsMap) + sortParameters(op.Parameters) + if err != nil { + return err + } + dpath, exists := duplicateOpId[op.ID] + if exists { + return fmt.Errorf("duplicate Operation ID %v for path %v and %v", op.ID, dpath, path) + } else { + duplicateOpId[op.ID] = path + } + switch strings.ToUpper(route.Method) { + case "GET": + pathItem.Get = op + case "POST": + pathItem.Post = op + case "HEAD": + pathItem.Head = op + case "PUT": + pathItem.Put = op + case "DELETE": + pathItem.Delete = op + case "OPTIONS": + pathItem.Options = op + case "PATCH": + pathItem.Patch = op + } + } + o.swagger.Paths.Paths[path] = pathItem + } + } + return nil +} + +// buildOperations builds operations for each webservice path +func (o *openAPI) buildOperations(route restful.Route, inPathCommonParamsMap map[interface{}]spec.Parameter) (ret *spec.Operation, err error) { + ret = &spec.Operation{ + OperationProps: spec.OperationProps{ + Description: route.Doc, + Consumes: route.Consumes, + Produces: route.Produces, + Schemes: o.config.ProtocolList, + Responses: &spec.Responses{ + ResponsesProps: spec.ResponsesProps{ + StatusCodeResponses: make(map[int]spec.Response), + }, + }, + }, + } + for k, v := range route.Metadata { + if strings.HasPrefix(k, common.ExtensionPrefix) { + if ret.Extensions == nil { + ret.Extensions = spec.Extensions{} + } + ret.Extensions.Add(k, v) + } + } + if ret.ID, ret.Tags, err = o.config.GetOperationIDAndTags(&route); err != nil { + return ret, err + } + + // Build responses + for _, resp := range route.ResponseErrors { + ret.Responses.StatusCodeResponses[resp.Code], err = o.buildResponse(resp.Model, resp.Message) + if err != nil { + return ret, err + } + } + // If there is no response but a write sample, assume that write sample is an http.StatusOK response. + if len(ret.Responses.StatusCodeResponses) == 0 && route.WriteSample != nil { + ret.Responses.StatusCodeResponses[http.StatusOK], err = o.buildResponse(route.WriteSample, "OK") + if err != nil { + return ret, err + } + } + for code, resp := range o.config.CommonResponses { + if _, exists := ret.Responses.StatusCodeResponses[code]; !exists { + ret.Responses.StatusCodeResponses[code] = resp + } + } + // If there is still no response, use default response provided. + if len(ret.Responses.StatusCodeResponses) == 0 { + ret.Responses.Default = o.config.DefaultResponse + } + + // Build non-common Parameters + ret.Parameters = make([]spec.Parameter, 0) + for _, param := range route.ParameterDocs { + if _, isCommon := inPathCommonParamsMap[mapKeyFromParam(param)]; !isCommon { + openAPIParam, err := o.buildParameter(param.Data(), route.ReadSample) + if err != nil { + return ret, err + } + ret.Parameters = append(ret.Parameters, openAPIParam) + } + } + return ret, nil +} + +func (o *openAPI) buildResponse(model interface{}, description string) (spec.Response, error) { + schema, err := o.toSchema(util.GetCanonicalTypeName(model)) + if err != nil { + return spec.Response{}, err + } + return spec.Response{ + ResponseProps: spec.ResponseProps{ + Description: description, + Schema: schema, + }, + }, nil +} + +func (o *openAPI) findCommonParameters(routes []restful.Route) (map[interface{}]spec.Parameter, error) { + commonParamsMap := make(map[interface{}]spec.Parameter, 0) + paramOpsCountByName := make(map[interface{}]int, 0) + paramNameKindToDataMap := make(map[interface{}]restful.ParameterData, 0) + for _, route := range routes { + routeParamDuplicateMap := make(map[interface{}]bool) + s := "" + for _, param := range route.ParameterDocs { + m, _ := json.Marshal(param.Data()) + s += string(m) + "\n" + key := mapKeyFromParam(param) + if routeParamDuplicateMap[key] { + msg, _ := json.Marshal(route.ParameterDocs) + return commonParamsMap, fmt.Errorf("duplicate parameter %v for route %v, %v", param.Data().Name, string(msg), s) + } + routeParamDuplicateMap[key] = true + paramOpsCountByName[key]++ + paramNameKindToDataMap[key] = param.Data() + } + } + for key, count := range paramOpsCountByName { + paramData := paramNameKindToDataMap[key] + if count == len(routes) && paramData.Kind != restful.BodyParameterKind { + openAPIParam, err := o.buildParameter(paramData, nil) + if err != nil { + return commonParamsMap, err + } + commonParamsMap[key] = openAPIParam + } + } + return commonParamsMap, nil +} + +func (o *openAPI) toSchema(name string) (_ *spec.Schema, err error) { + if openAPIType, openAPIFormat := common.OpenAPITypeFormat(name); openAPIType != "" { + return &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{openAPIType}, + Format: openAPIFormat, + }, + }, nil + } else { + ref, err := o.buildDefinitionForType(name) + if err != nil { + return nil, err + } + return &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: spec.MustCreateRef(ref), + }, + }, nil + } +} + +func (o *openAPI) buildParameter(restParam restful.ParameterData, bodySample interface{}) (ret spec.Parameter, err error) { + ret = spec.Parameter{ + ParamProps: spec.ParamProps{ + Name: restParam.Name, + Description: restParam.Description, + Required: restParam.Required, + }, + } + switch restParam.Kind { + case restful.BodyParameterKind: + if bodySample != nil { + ret.In = "body" + ret.Schema, err = o.toSchema(util.GetCanonicalTypeName(bodySample)) + return ret, err + } else { + // There is not enough information in the body parameter to build the definition. + // Body parameter has a data type that is a short name but we need full package name + // of the type to create a definition. + return ret, fmt.Errorf("restful body parameters are not supported: %v", restParam.DataType) + } + case restful.PathParameterKind: + ret.In = "path" + if !restParam.Required { + return ret, fmt.Errorf("path parameters should be marked at required for parameter %v", restParam) + } + case restful.QueryParameterKind: + ret.In = "query" + case restful.HeaderParameterKind: + ret.In = "header" + case restful.FormParameterKind: + ret.In = "formData" + default: + return ret, fmt.Errorf("unknown restful operation kind : %v", restParam.Kind) + } + openAPIType, openAPIFormat := common.OpenAPITypeFormat(restParam.DataType) + if openAPIType == "" { + return ret, fmt.Errorf("non-body Restful parameter type should be a simple type, but got : %v", restParam.DataType) + } + ret.Type = openAPIType + ret.Format = openAPIFormat + ret.UniqueItems = !restParam.AllowMultiple + return ret, nil +} + +func (o *openAPI) buildParameters(restParam []*restful.Parameter) (ret []spec.Parameter, err error) { + ret = make([]spec.Parameter, len(restParam)) + for i, v := range restParam { + ret[i], err = o.buildParameter(v.Data(), nil) + if err != nil { + return ret, err + } + } + return ret, nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/util.go b/vendor/k8s.io/kube-openapi/pkg/builder/util.go new file mode 100644 index 0000000000..5e9a56a6b8 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/builder/util.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "sort" + + "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" +) + +type parameters []spec.Parameter + +func (s parameters) Len() int { return len(s) } +func (s parameters) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// byNameIn used in sorting parameters by Name and In fields. +type byNameIn struct { + parameters +} + +func (s byNameIn) Less(i, j int) bool { + return s.parameters[i].Name < s.parameters[j].Name || (s.parameters[i].Name == s.parameters[j].Name && s.parameters[i].In < s.parameters[j].In) +} + +// SortParameters sorts parameters by Name and In fields. +func sortParameters(p []spec.Parameter) { + sort.Sort(byNameIn{p}) +} + +func groupRoutesByPath(routes []restful.Route) map[string][]restful.Route { + pathToRoutes := make(map[string][]restful.Route) + for _, r := range routes { + pathToRoutes[r.Path] = append(pathToRoutes[r.Path], r) + } + return pathToRoutes +} + +func mapKeyFromParam(param *restful.Parameter) interface{} { + return struct { + Name string + Kind int + }{ + Name: param.Data().Name, + Kind: param.Data().Kind, + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/handler/default_pruning.go b/vendor/k8s.io/kube-openapi/pkg/handler/default_pruning.go new file mode 100644 index 0000000000..69646871cd --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/handler/default_pruning.go @@ -0,0 +1,208 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import "github.com/go-openapi/spec" + +// PruneDefaults remove all the defaults recursively from all the +// schemas in the definitions, and does not modify the definitions in +// place. +func PruneDefaults(definitions spec.Definitions) spec.Definitions { + definitionsCloned := false + for k, v := range definitions { + if s := PruneDefaultsSchema(&v); s != &v { + if !definitionsCloned { + definitionsCloned = true + orig := definitions + definitions = make(spec.Definitions, len(orig)) + for k2, v2 := range orig { + definitions[k2] = v2 + } + } + definitions[k] = *s + } + } + return definitions +} + +// PruneDefaultsSchema remove all the defaults recursively from the +// schema in place. +func PruneDefaultsSchema(schema *spec.Schema) *spec.Schema { + if schema == nil { + return nil + } + + orig := schema + clone := func() { + if orig == schema { + schema = &spec.Schema{} + *schema = *orig + } + } + + if schema.Default != nil { + clone() + schema.Default = nil + } + + definitionsCloned := false + for k, v := range schema.Definitions { + if s := PruneDefaultsSchema(&v); s != &v { + if !definitionsCloned { + definitionsCloned = true + clone() + schema.Definitions = make(spec.Definitions, len(orig.Definitions)) + for k2, v2 := range orig.Definitions { + schema.Definitions[k2] = v2 + } + } + schema.Definitions[k] = *s + } + } + + propertiesCloned := false + for k, v := range schema.Properties { + if s := PruneDefaultsSchema(&v); s != &v { + if !propertiesCloned { + propertiesCloned = true + clone() + schema.Properties = make(map[string]spec.Schema, len(orig.Properties)) + for k2, v2 := range orig.Properties { + schema.Properties[k2] = v2 + } + } + schema.Properties[k] = *s + } + } + + patternPropertiesCloned := false + for k, v := range schema.PatternProperties { + if s := PruneDefaultsSchema(&v); s != &v { + if !patternPropertiesCloned { + patternPropertiesCloned = true + clone() + schema.PatternProperties = make(map[string]spec.Schema, len(orig.PatternProperties)) + for k2, v2 := range orig.PatternProperties { + schema.PatternProperties[k2] = v2 + } + } + schema.PatternProperties[k] = *s + } + } + + dependenciesCloned := false + for k, v := range schema.Dependencies { + if s := PruneDefaultsSchema(v.Schema); s != v.Schema { + if !dependenciesCloned { + dependenciesCloned = true + clone() + schema.Dependencies = make(spec.Dependencies, len(orig.Dependencies)) + for k2, v2 := range orig.Dependencies { + schema.Dependencies[k2] = v2 + } + } + v.Schema = s + schema.Dependencies[k] = v + } + } + + allOfCloned := false + for i := range schema.AllOf { + if s := PruneDefaultsSchema(&schema.AllOf[i]); s != &schema.AllOf[i] { + if !allOfCloned { + allOfCloned = true + clone() + schema.AllOf = make([]spec.Schema, len(orig.AllOf)) + copy(schema.AllOf, orig.AllOf) + } + schema.AllOf[i] = *s + } + } + + anyOfCloned := false + for i := range schema.AnyOf { + if s := PruneDefaultsSchema(&schema.AnyOf[i]); s != &schema.AnyOf[i] { + if !anyOfCloned { + anyOfCloned = true + clone() + schema.AnyOf = make([]spec.Schema, len(orig.AnyOf)) + copy(schema.AnyOf, orig.AnyOf) + } + schema.AnyOf[i] = *s + } + } + + oneOfCloned := false + for i := range schema.OneOf { + if s := PruneDefaultsSchema(&schema.OneOf[i]); s != &schema.OneOf[i] { + if !oneOfCloned { + oneOfCloned = true + clone() + schema.OneOf = make([]spec.Schema, len(orig.OneOf)) + copy(schema.OneOf, orig.OneOf) + } + schema.OneOf[i] = *s + } + } + + if schema.Not != nil { + if s := PruneDefaultsSchema(schema.Not); s != schema.Not { + clone() + schema.Not = s + } + } + + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + if s := PruneDefaultsSchema(schema.AdditionalProperties.Schema); s != schema.AdditionalProperties.Schema { + clone() + schema.AdditionalProperties = &spec.SchemaOrBool{Schema: s, Allows: schema.AdditionalProperties.Allows} + } + } + + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + if s := PruneDefaultsSchema(schema.AdditionalItems.Schema); s != schema.AdditionalItems.Schema { + clone() + schema.AdditionalItems = &spec.SchemaOrBool{Schema: s, Allows: schema.AdditionalItems.Allows} + } + } + + if schema.Items != nil { + if schema.Items.Schema != nil { + if s := PruneDefaultsSchema(schema.Items.Schema); s != schema.Items.Schema { + clone() + schema.Items = &spec.SchemaOrArray{Schema: s} + } + } else { + itemsCloned := false + for i := range schema.Items.Schemas { + if s := PruneDefaultsSchema(&schema.Items.Schemas[i]); s != &schema.Items.Schemas[i] { + if !itemsCloned { + clone() + schema.Items = &spec.SchemaOrArray{ + Schemas: make([]spec.Schema, len(orig.Items.Schemas)), + } + itemsCloned = true + copy(schema.Items.Schemas, orig.Items.Schemas) + } + schema.Items.Schemas[i] = *s + } + } + } + } + + return schema +} diff --git a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go new file mode 100644 index 0000000000..7cd1bbd0ff --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go @@ -0,0 +1,267 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "bytes" + "compress/gzip" + "crypto/sha512" + "fmt" + "mime" + "net/http" + "sync" + "time" + + "github.com/NYTimes/gziphandler" + "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" + "github.com/golang/protobuf/proto" + "github.com/googleapis/gnostic/compiler" + openapi_v2 "github.com/googleapis/gnostic/openapiv2" + jsoniter "github.com/json-iterator/go" + "github.com/munnerz/goautoneg" + "gopkg.in/yaml.v2" + + "k8s.io/kube-openapi/pkg/builder" + "k8s.io/kube-openapi/pkg/common" +) + +const ( + jsonExt = ".json" + + mimeJson = "application/json" + // TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags. + mimePb = "application/com.github.googleapis.gnostic.OpenAPIv2@68f4ded+protobuf" + mimePbGz = "application/x-gzip" +) + +// OpenAPIService is the service responsible for serving OpenAPI spec. It has +// the ability to safely change the spec while serving it. +type OpenAPIService struct { + // rwMutex protects All members of this service. + rwMutex sync.RWMutex + + lastModified time.Time + + specBytes []byte + specPb []byte + specPbGz []byte + + specBytesETag string + specPbETag string + specPbGzETag string +} + +func init() { + mime.AddExtensionType(".json", mimeJson) + mime.AddExtensionType(".pb-v1", mimePb) + mime.AddExtensionType(".gz", mimePbGz) +} + +func computeETag(data []byte) string { + return fmt.Sprintf("\"%X\"", sha512.Sum512(data)) +} + +// NewOpenAPIService builds an OpenAPIService starting with the given spec. +func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) { + o := &OpenAPIService{} + if err := o.UpdateSpec(spec); err != nil { + return nil, err + } + return o, nil +} + +func (o *OpenAPIService) getSwaggerBytes() ([]byte, string, time.Time) { + o.rwMutex.RLock() + defer o.rwMutex.RUnlock() + return o.specBytes, o.specBytesETag, o.lastModified +} + +func (o *OpenAPIService) getSwaggerPbBytes() ([]byte, string, time.Time) { + o.rwMutex.RLock() + defer o.rwMutex.RUnlock() + return o.specPb, o.specPbETag, o.lastModified +} + +func (o *OpenAPIService) getSwaggerPbGzBytes() ([]byte, string, time.Time) { + o.rwMutex.RLock() + defer o.rwMutex.RUnlock() + return o.specPbGz, o.specPbGzETag, o.lastModified +} + +func (o *OpenAPIService) UpdateSpec(openapiSpec *spec.Swagger) (err error) { + specBytes, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(openapiSpec) + if err != nil { + return err + } + var json map[string]interface{} + if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(specBytes, &json); err != nil { + return err + } + specPb, err := ToProtoBinary(json) + if err != nil { + return err + } + specPbGz := toGzip(specPb) + + specBytesETag := computeETag(specBytes) + specPbETag := computeETag(specPb) + specPbGzETag := computeETag(specPbGz) + + lastModified := time.Now() + + o.rwMutex.Lock() + defer o.rwMutex.Unlock() + + o.specBytes = specBytes + o.specPb = specPb + o.specPbGz = specPbGz + o.specBytesETag = specBytesETag + o.specPbETag = specPbETag + o.specPbGzETag = specPbGzETag + o.lastModified = lastModified + + return nil +} + +func jsonToYAML(j map[string]interface{}) yaml.MapSlice { + if j == nil { + return nil + } + ret := make(yaml.MapSlice, 0, len(j)) + for k, v := range j { + ret = append(ret, yaml.MapItem{k, jsonToYAMLValue(v)}) + } + return ret +} + +func jsonToYAMLValue(j interface{}) interface{} { + switch j := j.(type) { + case map[string]interface{}: + return jsonToYAML(j) + case []interface{}: + ret := make([]interface{}, len(j)) + for i := range j { + ret[i] = jsonToYAMLValue(j[i]) + } + return ret + case float64: + // replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151 + if i64 := int64(j); j == float64(i64) { + if i := int(i64); i64 == int64(i) { + return i + } + return i64 + } + if ui64 := uint64(j); j == float64(ui64) { + return ui64 + } + return j + case int64: + if i := int(j); j == int64(i) { + return i + } + return j + } + return j +} + +func ToProtoBinary(json map[string]interface{}) ([]byte, error) { + document, err := openapi_v2.NewDocument(jsonToYAML(json), compiler.NewContext("$root", nil)) + if err != nil { + return nil, err + } + return proto.Marshal(document) +} + +func toGzip(data []byte) []byte { + var buf bytes.Buffer + zw := gzip.NewWriter(&buf) + zw.Write(data) + zw.Close() + return buf.Bytes() +} + +// RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec. +// +// Deprecated: use OpenAPIService.RegisterOpenAPIVersionedService instead. +func RegisterOpenAPIVersionedService(spec *spec.Swagger, servePath string, handler common.PathHandler) (*OpenAPIService, error) { + o, err := NewOpenAPIService(spec) + if err != nil { + return nil, err + } + return o, o.RegisterOpenAPIVersionedService(servePath, handler) +} + +// RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec. +func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handler common.PathHandler) error { + accepted := []struct { + Type string + SubType string + GetDataAndETag func() ([]byte, string, time.Time) + }{ + {"application", "json", o.getSwaggerBytes}, + {"application", "com.github.proto-openapi.spec.v2@v1.0+protobuf", o.getSwaggerPbBytes}, + } + + handler.Handle(servePath, gziphandler.GzipHandler(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + decipherableFormats := r.Header.Get("Accept") + if decipherableFormats == "" { + decipherableFormats = "*/*" + } + clauses := goautoneg.ParseAccept(decipherableFormats) + w.Header().Add("Vary", "Accept") + for _, clause := range clauses { + for _, accepts := range accepted { + if clause.Type != accepts.Type && clause.Type != "*" { + continue + } + if clause.SubType != accepts.SubType && clause.SubType != "*" { + continue + } + + // serve the first matching media type in the sorted clause list + data, etag, lastModified := accepts.GetDataAndETag() + w.Header().Set("Etag", etag) + // ServeContent will take care of caching using eTag. + http.ServeContent(w, r, servePath, lastModified, bytes.NewReader(data)) + return + } + } + // Return 406 for not acceptable format + w.WriteHeader(406) + return + }), + )) + + return nil +} + +// BuildAndRegisterOpenAPIVersionedService builds the spec and registers a handler to provide access to it. +// Use this method if your OpenAPI spec is static. If you want to update the spec, use BuildOpenAPISpec then RegisterOpenAPIVersionedService. +func BuildAndRegisterOpenAPIVersionedService(servePath string, webServices []*restful.WebService, config *common.Config, handler common.PathHandler) (*OpenAPIService, error) { + spec, err := builder.BuildOpenAPISpec(webServices, config) + if err != nil { + return nil, err + } + o, err := NewOpenAPIService(spec) + if err != nil { + return nil, err + } + return o, o.RegisterOpenAPIVersionedService(servePath, handler) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go new file mode 100644 index 0000000000..adc76f6c0c --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -0,0 +1,452 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "fmt" + "path" + "sort" + "strings" + + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +// ToSchema converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2). +func ToSchema(models proto.Models) (*schema.Schema, error) { + return ToSchemaWithPreserveUnknownFields(models, false) +} + +// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. +func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + input: models, + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + if err := c.convertAll(); err != nil { + return nil, err + } + c.addCommonTypes() + return c.output, nil +} + +type convert struct { + input proto.Models + preserveUnknownFields bool + output *schema.Schema + + currentName string + current *schema.Atom + errorMessages []string +} + +func (c *convert) push(name string, a *schema.Atom) *convert { + return &convert{ + input: c.input, + preserveUnknownFields: c.preserveUnknownFields, + output: c.output, + currentName: name, + current: a, + } +} + +func (c *convert) top() *schema.Atom { return c.current } + +func (c *convert) pop(c2 *convert) { + c.errorMessages = append(c.errorMessages, c2.errorMessages...) +} + +func (c *convert) convertAll() error { + for _, name := range c.input.ListModels() { + model := c.input.LookupModel(name) + c.insertTypeDef(name, model) + } + if len(c.errorMessages) > 0 { + return errors.New(strings.Join(c.errorMessages, "\n")) + } + return nil +} + +func (c *convert) reportError(format string, args ...interface{}) { + c.errorMessages = append(c.errorMessages, + c.currentName+": "+fmt.Sprintf(format, args...), + ) +} + +func (c *convert) insertTypeDef(name string, model proto.Schema) { + def := schema.TypeDef{ + Name: name, + } + c2 := c.push(name, &def.Atom) + model.Accept(c2) + c.pop(c2) + if def.Atom == (schema.Atom{}) { + // This could happen if there were a top-level reference. + return + } + c.output.Types = append(c.output.Types, def) +} + +func (c *convert) addCommonTypes() { + c.output.Types = append(c.output.Types, untypedDef) + c.output.Types = append(c.output.Types, deducedDef) +} + +var untypedName string = "__untyped_atomic_" + +var untypedDef schema.TypeDef = schema.TypeDef{ + Name: untypedName, + Atom: schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + List: &schema.List{ + ElementType: schema.TypeRef{ + NamedType: &untypedName, + }, + ElementRelationship: schema.Atomic, + }, + Map: &schema.Map{ + ElementType: schema.TypeRef{ + NamedType: &untypedName, + }, + ElementRelationship: schema.Atomic, + }, + }, +} + +var deducedName string = "__untyped_deduced_" + +var deducedDef schema.TypeDef = schema.TypeDef{ + Name: deducedName, + Atom: schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + List: &schema.List{ + ElementType: schema.TypeRef{ + NamedType: &untypedName, + }, + ElementRelationship: schema.Atomic, + }, + Map: &schema.Map{ + ElementType: schema.TypeRef{ + NamedType: &deducedName, + }, + ElementRelationship: schema.Separable, + }, + }, +} + +func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { + var tr schema.TypeRef + if r, ok := model.(*proto.Ref); ok { + if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { + return schema.TypeRef{ + NamedType: &untypedName, + } + } + // reference a named type + _, n := path.Split(r.Reference()) + tr.NamedType = &n + } else { + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &tr.Inlined) + c2.preserveUnknownFields = preserveUnknownFields + model.Accept(c2) + c.pop(c2) + + if tr == (schema.TypeRef{}) { + // emit warning? + tr.NamedType = &untypedName + } + } + return tr +} + +func makeUnions(extensions map[string]interface{}) ([]schema.Union, error) { + schemaUnions := []schema.Union{} + if iunions, ok := extensions["x-kubernetes-unions"]; ok { + unions, ok := iunions.([]interface{}) + if !ok { + return nil, fmt.Errorf(`"x-kubernetes-unions" should be a list, got %#v`, unions) + } + for _, iunion := range unions { + union, ok := iunion.(map[interface{}]interface{}) + if !ok { + return nil, fmt.Errorf(`"x-kubernetes-unions" items should be a map of string to unions, got %#v`, iunion) + } + unionMap := map[string]interface{}{} + for k, v := range union { + key, ok := k.(string) + if !ok { + return nil, fmt.Errorf(`"x-kubernetes-unions" has non-string key: %#v`, k) + } + unionMap[key] = v + } + schemaUnion, err := makeUnion(unionMap) + if err != nil { + return nil, err + } + schemaUnions = append(schemaUnions, schemaUnion) + } + } + + // Make sure we have no overlap between unions + fs := map[string]struct{}{} + for _, u := range schemaUnions { + if u.Discriminator != nil { + if _, ok := fs[*u.Discriminator]; ok { + return nil, fmt.Errorf("%v field appears multiple times in unions", *u.Discriminator) + } + fs[*u.Discriminator] = struct{}{} + } + for _, f := range u.Fields { + if _, ok := fs[f.FieldName]; ok { + return nil, fmt.Errorf("%v field appears multiple times in unions", f.FieldName) + } + fs[f.FieldName] = struct{}{} + } + } + + return schemaUnions, nil +} + +func makeUnion(extensions map[string]interface{}) (schema.Union, error) { + union := schema.Union{ + Fields: []schema.UnionField{}, + } + + if idiscriminator, ok := extensions["discriminator"]; ok { + discriminator, ok := idiscriminator.(string) + if !ok { + return schema.Union{}, fmt.Errorf(`"discriminator" must be a string, got: %#v`, idiscriminator) + } + union.Discriminator = &discriminator + } + + if ifields, ok := extensions["fields-to-discriminateBy"]; ok { + fields, ok := ifields.(map[interface{}]interface{}) + if !ok { + return schema.Union{}, fmt.Errorf(`"fields-to-discriminateBy" must be a map[string]string, got: %#v`, ifields) + } + // Needs sorted keys by field. + keys := []string{} + for ifield := range fields { + field, ok := ifield.(string) + if !ok { + return schema.Union{}, fmt.Errorf(`"fields-to-discriminateBy": field must be a string, got: %#v`, ifield) + } + keys = append(keys, field) + + } + sort.Strings(keys) + reverseMap := map[string]struct{}{} + for _, field := range keys { + value := fields[field] + discriminated, ok := value.(string) + if !ok { + return schema.Union{}, fmt.Errorf(`"fields-to-discriminateBy"/%v: value must be a string, got: %#v`, field, value) + } + union.Fields = append(union.Fields, schema.UnionField{ + FieldName: field, + DiscriminatorValue: discriminated, + }) + + // Check that we don't have the same discriminateBy multiple times. + if _, ok := reverseMap[discriminated]; ok { + return schema.Union{}, fmt.Errorf("Multiple fields have the same discriminated name: %v", discriminated) + } + reverseMap[discriminated] = struct{}{} + } + } + + if union.Discriminator != nil && len(union.Fields) == 0 { + return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator) + } + return union, nil +} + +func (c *convert) VisitKind(k *proto.Kind) { + preserveUnknownFields := c.preserveUnknownFields + if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + preserveUnknownFields = true + } + + a := c.top() + a.Map = &schema.Map{} + for _, name := range k.FieldOrder { + member := k.Fields[name] + tr := c.makeRef(member, preserveUnknownFields) + a.Map.Fields = append(a.Map.Fields, schema.StructField{ + Name: name, + Type: tr, + Default: member.GetDefault(), + }) + } + + unions, err := makeUnions(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + return + } + // TODO: We should check that the fields and discriminator + // specified in the union are actual fields in the struct. + a.Map.Unions = unions + + if preserveUnknownFields { + a.Map.ElementType = schema.TypeRef{ + NamedType: &deducedName, + } + } + + ext := k.GetExtensions() + if val, ok := ext["x-kubernetes-map-type"]; ok { + switch val { + case "atomic": + a.Map.ElementRelationship = schema.Atomic + case "granular": + a.Map.ElementRelationship = schema.Separable + default: + c.reportError("unknown map type %v", val) + } + } +} + +func toStringSlice(o interface{}) (out []string, ok bool) { + switch t := o.(type) { + case []interface{}: + for _, v := range t { + switch vt := v.(type) { + case string: + out = append(out, vt) + } + } + return out, true + } + return nil, false +} + +func (c *convert) VisitArray(a *proto.Array) { + atom := c.top() + atom.List = &schema.List{ + ElementRelationship: schema.Atomic, + } + l := atom.List + l.ElementType = c.makeRef(a.SubType, c.preserveUnknownFields) + + ext := a.GetExtensions() + + if val, ok := ext["x-kubernetes-list-type"]; ok { + if val == "atomic" { + l.ElementRelationship = schema.Atomic + } else if val == "set" { + l.ElementRelationship = schema.Associative + } else if val == "map" { + l.ElementRelationship = schema.Associative + if keys, ok := ext["x-kubernetes-list-map-keys"]; ok { + if keyNames, ok := toStringSlice(keys); ok { + l.Keys = keyNames + } else { + c.reportError("uninterpreted map keys: %#v", keys) + } + } else { + c.reportError("missing map keys") + } + } else { + c.reportError("unknown list type %v", val) + l.ElementRelationship = schema.Atomic + } + } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { + if val == "merge" || val == "merge,retainKeys" { + l.ElementRelationship = schema.Associative + if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { + if keyName, ok := key.(string); ok { + l.Keys = []string{keyName} + } else { + c.reportError("uninterpreted merge key: %#v", key) + } + } else { + // It's not an error for this to be absent, it + // means it's a set. + } + } else if val == "retainKeys" { + } else { + c.reportError("unknown patch strategy %v", val) + l.ElementRelationship = schema.Atomic + } + } +} + +func (c *convert) VisitMap(m *proto.Map) { + a := c.top() + a.Map = &schema.Map{} + a.Map.ElementType = c.makeRef(m.SubType, c.preserveUnknownFields) + + ext := m.GetExtensions() + if val, ok := ext["x-kubernetes-map-type"]; ok { + switch val { + case "atomic": + a.Map.ElementRelationship = schema.Atomic + case "granular": + a.Map.ElementRelationship = schema.Separable + default: + c.reportError("unknown map type %v", val) + } + } +} + +func ptr(s schema.Scalar) *schema.Scalar { return &s } + +func (c *convert) VisitPrimitive(p *proto.Primitive) { + a := c.top() + switch p.Type { + case proto.Integer: + a.Scalar = ptr(schema.Numeric) + case proto.Number: + a.Scalar = ptr(schema.Numeric) + case proto.String: + switch p.Format { + case "": + a.Scalar = ptr(schema.String) + case "byte": + // byte really means []byte and is encoded as a string. + a.Scalar = ptr(schema.String) + case "int-or-string": + a.Scalar = ptr(schema.Scalar("untyped")) + case "date-time": + a.Scalar = ptr(schema.Scalar("untyped")) + default: + a.Scalar = ptr(schema.Scalar("untyped")) + } + case proto.Boolean: + a.Scalar = ptr(schema.Boolean) + default: + a.Scalar = ptr(schema.Scalar("untyped")) + } +} + +func (c *convert) VisitArbitrary(a *proto.Arbitrary) { + *c.top() = untypedDef.Atom + if c.preserveUnknownFields { + *c.top() = deducedDef.Atom + } +} + +func (c *convert) VisitReference(proto.Reference) { + // Do nothing, we handle references specially +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/trie.go b/vendor/k8s.io/kube-openapi/pkg/util/trie.go new file mode 100644 index 0000000000..a9a76c1791 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/trie.go @@ -0,0 +1,79 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +// A simple trie implementation with Add and HasPrefix methods only. +type Trie struct { + children map[byte]*Trie + wordTail bool + word string +} + +// NewTrie creates a Trie and add all strings in the provided list to it. +func NewTrie(list []string) Trie { + ret := Trie{ + children: make(map[byte]*Trie), + wordTail: false, + } + for _, v := range list { + ret.Add(v) + } + return ret +} + +// Add adds a word to this trie +func (t *Trie) Add(v string) { + root := t + for _, b := range []byte(v) { + child, exists := root.children[b] + if !exists { + child = &Trie{ + children: make(map[byte]*Trie), + wordTail: false, + } + root.children[b] = child + } + root = child + } + root.wordTail = true + root.word = v +} + +// HasPrefix returns true of v has any of the prefixes stored in this trie. +func (t *Trie) HasPrefix(v string) bool { + _, has := t.GetPrefix(v) + return has +} + +// GetPrefix is like HasPrefix but return the prefix in case of match or empty string otherwise. +func (t *Trie) GetPrefix(v string) (string, bool) { + root := t + if root.wordTail { + return root.word, true + } + for _, b := range []byte(v) { + child, exists := root.children[b] + if !exists { + return "", false + } + if child.wordTail { + return child.word, true + } + root = child + } + return "", false +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/util.go b/vendor/k8s.io/kube-openapi/pkg/util/util.go new file mode 100644 index 0000000000..1eb674eea0 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/util.go @@ -0,0 +1,105 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "reflect" + "strings" +) + +// [DEPRECATED] ToCanonicalName converts Golang package/type canonical name into REST friendly OpenAPI name. +// This method is deprecated because it has a misleading name. Please use ToRESTFriendlyName +// instead +// +// NOTE: actually the "canonical name" in this method should be named "REST friendly OpenAPI name", +// which is different from "canonical name" defined in GetCanonicalTypeName. The "canonical name" defined +// in GetCanonicalTypeName means Go type names with full package path. +// +// Examples of REST friendly OpenAPI name: +// Input: k8s.io/api/core/v1.Pod +// Output: io.k8s.api.core.v1.Pod +// +// Input: k8s.io/api/core/v1 +// Output: io.k8s.api.core.v1 +// +// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo +// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo +func ToCanonicalName(name string) string { + return ToRESTFriendlyName(name) +} + +// ToRESTFriendlyName converts Golang package/type canonical name into REST friendly OpenAPI name. +// +// Examples of REST friendly OpenAPI name: +// Input: k8s.io/api/core/v1.Pod +// Output: io.k8s.api.core.v1.Pod +// +// Input: k8s.io/api/core/v1 +// Output: io.k8s.api.core.v1 +// +// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo +// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo +func ToRESTFriendlyName(name string) string { + nameParts := strings.Split(name, "/") + // Reverse first part. e.g., io.k8s... instead of k8s.io... + if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { + parts := strings.Split(nameParts[0], ".") + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + nameParts[0] = strings.Join(parts, ".") + } + return strings.Join(nameParts, ".") +} + +// OpenAPICanonicalTypeNamer is an interface for models without Go type to seed model name. +// +// OpenAPI canonical names are Go type names with full package path, for uniquely indentifying +// a model / Go type. If a Go type is vendored from another package, only the path after "/vendor/" +// should be used. For custom resource definition (CRD), the canonical name is expected to be +// group/version.kind +// +// Examples of canonical name: +// Go type: k8s.io/kubernetes/pkg/apis/core.Pod +// CRD: csi.storage.k8s.io/v1alpha1.CSINodeInfo +// +// Example for vendored Go type: +// Original full path: k8s.io/kubernetes/vendor/k8s.io/api/core/v1.Pod +// Canonical name: k8s.io/api/core/v1.Pod +type OpenAPICanonicalTypeNamer interface { + OpenAPICanonicalTypeName() string +} + +// GetCanonicalTypeName will find the canonical type name of a sample object, removing +// the "vendor" part of the path +func GetCanonicalTypeName(model interface{}) string { + if namer, ok := model.(OpenAPICanonicalTypeNamer); ok { + return namer.OpenAPICanonicalTypeName() + } + t := reflect.TypeOf(model) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.PkgPath() == "" { + return t.Name() + } + path := t.PkgPath() + if strings.Contains(path, "/vendor/") { + path = path[strings.Index(path, "/vendor/")+len("/vendor/"):] + } + return path + "." + t.Name() +} diff --git a/vendor/k8s.io/utils/net/ipnet.go b/vendor/k8s.io/utils/net/ipnet.go new file mode 100644 index 0000000000..c2e844bf5d --- /dev/null +++ b/vendor/k8s.io/utils/net/ipnet.go @@ -0,0 +1,221 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "net" + "strings" +) + +// IPNetSet maps string to net.IPNet. +type IPNetSet map[string]*net.IPNet + +// ParseIPNets parses string slice to IPNetSet. +func ParseIPNets(specs ...string) (IPNetSet, error) { + ipnetset := make(IPNetSet) + for _, spec := range specs { + spec = strings.TrimSpace(spec) + _, ipnet, err := net.ParseCIDR(spec) + if err != nil { + return nil, err + } + k := ipnet.String() // In case of normalization + ipnetset[k] = ipnet + } + return ipnetset, nil +} + +// Insert adds items to the set. +func (s IPNetSet) Insert(items ...*net.IPNet) { + for _, item := range items { + s[item.String()] = item + } +} + +// Delete removes all items from the set. +func (s IPNetSet) Delete(items ...*net.IPNet) { + for _, item := range items { + delete(s, item.String()) + } +} + +// Has returns true if and only if item is contained in the set. +func (s IPNetSet) Has(item *net.IPNet) bool { + _, contained := s[item.String()] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s IPNetSet) HasAll(items ...*net.IPNet) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s IPNetSet) Difference(s2 IPNetSet) IPNetSet { + result := make(IPNetSet) + for k, i := range s { + _, found := s2[k] + if found { + continue + } + result[k] = i + } + return result +} + +// StringSlice returns a []string with the String representation of each element in the set. +// Order is undefined. +func (s IPNetSet) StringSlice() []string { + a := make([]string, 0, len(s)) + for k := range s { + a = append(a, k) + } + return a +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s IPNetSet) IsSuperset(s2 IPNetSet) bool { + for k := range s2 { + _, found := s[k] + if !found { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s IPNetSet) Equal(s2 IPNetSet) bool { + return len(s) == len(s2) && s.IsSuperset(s2) +} + +// Len returns the size of the set. +func (s IPNetSet) Len() int { + return len(s) +} + +// IPSet maps string to net.IP +type IPSet map[string]net.IP + +// ParseIPSet parses string slice to IPSet +func ParseIPSet(items ...string) (IPSet, error) { + ipset := make(IPSet) + for _, item := range items { + ip := net.ParseIP(strings.TrimSpace(item)) + if ip == nil { + return nil, fmt.Errorf("error parsing IP %q", item) + } + + ipset[ip.String()] = ip + } + + return ipset, nil +} + +// Insert adds items to the set. +func (s IPSet) Insert(items ...net.IP) { + for _, item := range items { + s[item.String()] = item + } +} + +// Delete removes all items from the set. +func (s IPSet) Delete(items ...net.IP) { + for _, item := range items { + delete(s, item.String()) + } +} + +// Has returns true if and only if item is contained in the set. +func (s IPSet) Has(item net.IP) bool { + _, contained := s[item.String()] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s IPSet) HasAll(items ...net.IP) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s IPSet) Difference(s2 IPSet) IPSet { + result := make(IPSet) + for k, i := range s { + _, found := s2[k] + if found { + continue + } + result[k] = i + } + return result +} + +// StringSlice returns a []string with the String representation of each element in the set. +// Order is undefined. +func (s IPSet) StringSlice() []string { + a := make([]string, 0, len(s)) + for k := range s { + a = append(a, k) + } + return a +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s IPSet) IsSuperset(s2 IPSet) bool { + for k := range s2 { + _, found := s[k] + if !found { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s IPSet) Equal(s2 IPSet) bool { + return len(s) == len(s2) && s.IsSuperset(s2) +} + +// Len returns the size of the set. +func (s IPSet) Len() int { + return len(s) +} diff --git a/vendor/k8s.io/utils/net/net.go b/vendor/k8s.io/utils/net/net.go new file mode 100644 index 0000000000..077e447276 --- /dev/null +++ b/vendor/k8s.io/utils/net/net.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "errors" + "fmt" + "math" + "math/big" + "net" + "strconv" +) + +// ParseCIDRs parses a list of cidrs and return error if any is invalid. +// order is maintained +func ParseCIDRs(cidrsString []string) ([]*net.IPNet, error) { + cidrs := make([]*net.IPNet, 0, len(cidrsString)) + for _, cidrString := range cidrsString { + _, cidr, err := net.ParseCIDR(cidrString) + if err != nil { + return nil, fmt.Errorf("failed to parse cidr value:%q with error:%v", cidrString, err) + } + cidrs = append(cidrs, cidr) + } + return cidrs, nil +} + +// IsDualStackIPs returns if a slice of ips is: +// - all are valid ips +// - at least one ip from each family (v4 or v6) +func IsDualStackIPs(ips []net.IP) (bool, error) { + v4Found := false + v6Found := false + for _, ip := range ips { + if ip == nil { + return false, fmt.Errorf("ip %v is invalid", ip) + } + + if v4Found && v6Found { + continue + } + + if IsIPv6(ip) { + v6Found = true + continue + } + + v4Found = true + } + + return (v4Found && v6Found), nil +} + +// IsDualStackIPStrings returns if +// - all are valid ips +// - at least one ip from each family (v4 or v6) +func IsDualStackIPStrings(ips []string) (bool, error) { + parsedIPs := make([]net.IP, 0, len(ips)) + for _, ip := range ips { + parsedIP := net.ParseIP(ip) + parsedIPs = append(parsedIPs, parsedIP) + } + return IsDualStackIPs(parsedIPs) +} + +// IsDualStackCIDRs returns if +// - all are valid cidrs +// - at least one cidr from each family (v4 or v6) +func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) { + v4Found := false + v6Found := false + for _, cidr := range cidrs { + if cidr == nil { + return false, fmt.Errorf("cidr %v is invalid", cidr) + } + + if v4Found && v6Found { + continue + } + + if IsIPv6(cidr.IP) { + v6Found = true + continue + } + v4Found = true + } + + return v4Found && v6Found, nil +} + +// IsDualStackCIDRStrings returns if +// - all are valid cidrs +// - at least one cidr from each family (v4 or v6) +func IsDualStackCIDRStrings(cidrs []string) (bool, error) { + parsedCIDRs, err := ParseCIDRs(cidrs) + if err != nil { + return false, err + } + return IsDualStackCIDRs(parsedCIDRs) +} + +// IsIPv6 returns if netIP is IPv6. +func IsIPv6(netIP net.IP) bool { + return netIP != nil && netIP.To4() == nil +} + +// IsIPv6String returns if ip is IPv6. +func IsIPv6String(ip string) bool { + netIP := net.ParseIP(ip) + return IsIPv6(netIP) +} + +// IsIPv6CIDRString returns if cidr is IPv6. +// This assumes cidr is a valid CIDR. +func IsIPv6CIDRString(cidr string) bool { + ip, _, _ := net.ParseCIDR(cidr) + return IsIPv6(ip) +} + +// IsIPv6CIDR returns if a cidr is ipv6 +func IsIPv6CIDR(cidr *net.IPNet) bool { + ip := cidr.IP + return IsIPv6(ip) +} + +// IsIPv4 returns if netIP is IPv4. +func IsIPv4(netIP net.IP) bool { + return netIP != nil && netIP.To4() != nil +} + +// IsIPv4String returns if ip is IPv4. +func IsIPv4String(ip string) bool { + netIP := net.ParseIP(ip) + return IsIPv4(netIP) +} + +// IsIPv4CIDR returns if a cidr is ipv4 +func IsIPv4CIDR(cidr *net.IPNet) bool { + ip := cidr.IP + return IsIPv4(ip) +} + +// IsIPv4CIDRString returns if cidr is IPv4. +// This assumes cidr is a valid CIDR. +func IsIPv4CIDRString(cidr string) bool { + ip, _, _ := net.ParseCIDR(cidr) + return IsIPv4(ip) +} + +// ParsePort parses a string representing an IP port. If the string is not a +// valid port number, this returns an error. +func ParsePort(port string, allowZero bool) (int, error) { + portInt, err := strconv.ParseUint(port, 10, 16) + if err != nil { + return 0, err + } + if portInt == 0 && !allowZero { + return 0, errors.New("0 is not a valid port number") + } + return int(portInt), nil +} + +// BigForIP creates a big.Int based on the provided net.IP +func BigForIP(ip net.IP) *big.Int { + // NOTE: Convert to 16-byte representation so we can + // handle v4 and v6 values the same way. + return big.NewInt(0).SetBytes(ip.To16()) +} + +// AddIPOffset adds the provided integer offset to a base big.Int representing a net.IP +// NOTE: If you started with a v4 address and overflow it, you get a v6 result. +func AddIPOffset(base *big.Int, offset int) net.IP { + r := big.NewInt(0).Add(base, big.NewInt(int64(offset))).Bytes() + r = append(make([]byte, 16), r...) + return net.IP(r[len(r)-16:]) +} + +// RangeSize returns the size of a range in valid addresses. +// returns the size of the subnet (or math.MaxInt64 if the range size would overflow int64) +func RangeSize(subnet *net.IPNet) int64 { + ones, bits := subnet.Mask.Size() + if bits == 32 && (bits-ones) >= 31 || bits == 128 && (bits-ones) >= 127 { + return 0 + } + // this checks that we are not overflowing an int64 + if bits-ones >= 63 { + return math.MaxInt64 + } + return int64(1) << uint(bits-ones) +} + +// GetIndexedIP returns a net.IP that is subnet.IP + index in the contiguous IP space. +func GetIndexedIP(subnet *net.IPNet, index int) (net.IP, error) { + ip := AddIPOffset(BigForIP(subnet.IP), index) + if !subnet.Contains(ip) { + return nil, fmt.Errorf("can't generate IP with index %d from subnet. subnet too small. subnet: %q", index, subnet) + } + return ip, nil +} diff --git a/vendor/k8s.io/utils/net/port.go b/vendor/k8s.io/utils/net/port.go new file mode 100644 index 0000000000..b4ff128e0d --- /dev/null +++ b/vendor/k8s.io/utils/net/port.go @@ -0,0 +1,137 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +// IPFamily refers to a specific family if not empty, i.e. "4" or "6". +type IPFamily string + +// Constants for valid IPFamilys: +const ( + IPv4 IPFamily = "4" + IPv6 = "6" +) + +// Protocol is a network protocol support by LocalPort. +type Protocol string + +// Constants for valid protocols: +const ( + TCP Protocol = "TCP" + UDP Protocol = "UDP" +) + +// LocalPort represents an IP address and port pair along with a protocol +// and potentially a specific IP family. +// A LocalPort can be opened and subsequently closed. +type LocalPort struct { + // Description is an arbitrary string. + Description string + // IP is the IP address part of a given local port. + // If this string is empty, the port binds to all local IP addresses. + IP string + // If IPFamily is not empty, the port binds only to addresses of this + // family. + // IF empty along with IP, bind to local addresses of any family. + IPFamily IPFamily + // Port is the port number. + // A value of 0 causes a port to be automatically chosen. + Port int + // Protocol is the protocol, e.g. TCP + Protocol Protocol +} + +// NewLocalPort returns a LocalPort instance and ensures IPFamily and IP are +// consistent and that the given protocol is valid. +func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protocol) (*LocalPort, error) { + if protocol != TCP && protocol != UDP { + return nil, fmt.Errorf("Unsupported protocol %s", protocol) + } + if ipFamily != "" && ipFamily != "4" && ipFamily != "6" { + return nil, fmt.Errorf("Invalid IP family %s", ipFamily) + } + if ip != "" { + parsedIP := net.ParseIP(ip) + if parsedIP == nil { + return nil, fmt.Errorf("invalid ip address %s", ip) + } + asIPv4 := parsedIP.To4() + if asIPv4 == nil && ipFamily == IPv4 || asIPv4 != nil && ipFamily == IPv6 { + return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily) + } + } + return &LocalPort{Description: desc, IP: ip, IPFamily: ipFamily, Port: port, Protocol: protocol}, nil +} + +func (lp *LocalPort) String() string { + ipPort := net.JoinHostPort(lp.IP, strconv.Itoa(lp.Port)) + return fmt.Sprintf("%q (%s/%s%s)", lp.Description, ipPort, strings.ToLower(string(lp.Protocol)), lp.IPFamily) +} + +// Closeable closes an opened LocalPort. +type Closeable interface { + Close() error +} + +// PortOpener can open a LocalPort and allows later closing it. +type PortOpener interface { + OpenLocalPort(lp *LocalPort) (Closeable, error) +} + +type listenPortOpener struct{} + +// ListenPortOpener opens ports by calling bind() and listen(). +var ListenPortOpener listenPortOpener + +// OpenLocalPort holds the given local port open. +func (l *listenPortOpener) OpenLocalPort(lp *LocalPort) (Closeable, error) { + return openLocalPort(lp) +} + +func openLocalPort(lp *LocalPort) (Closeable, error) { + var socket Closeable + hostPort := net.JoinHostPort(lp.IP, strconv.Itoa(lp.Port)) + switch lp.Protocol { + case TCP: + network := "tcp" + string(lp.IPFamily) + listener, err := net.Listen(network, hostPort) + if err != nil { + return nil, err + } + socket = listener + case UDP: + network := "udp" + string(lp.IPFamily) + addr, err := net.ResolveUDPAddr(network, hostPort) + if err != nil { + return nil, err + } + conn, err := net.ListenUDP(network, addr) + if err != nil { + return nil, err + } + socket = conn + default: + return nil, fmt.Errorf("unknown protocol %q", lp.Protocol) + } + return socket, nil +} diff --git a/vendor/k8s.io/utils/path/file.go b/vendor/k8s.io/utils/path/file.go new file mode 100644 index 0000000000..319914641a --- /dev/null +++ b/vendor/k8s.io/utils/path/file.go @@ -0,0 +1,78 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path + +import ( + "errors" + "os" +) + +// LinkTreatment is the base type for constants used by Exists that indicate +// how symlinks are treated for existence checks. +type LinkTreatment int + +const ( + // CheckFollowSymlink follows the symlink and verifies that the target of + // the symlink exists. + CheckFollowSymlink LinkTreatment = iota + + // CheckSymlinkOnly does not follow the symlink and verifies only that they + // symlink itself exists. + CheckSymlinkOnly +) + +// ErrInvalidLinkTreatment indicates that the link treatment behavior requested +// is not a valid behavior. +var ErrInvalidLinkTreatment = errors.New("unknown link behavior") + +// Exists checks if specified file, directory, or symlink exists. The behavior +// of the test depends on the linkBehaviour argument. See LinkTreatment for +// more details. +func Exists(linkBehavior LinkTreatment, filename string) (bool, error) { + var err error + + if linkBehavior == CheckFollowSymlink { + _, err = os.Stat(filename) + } else if linkBehavior == CheckSymlinkOnly { + _, err = os.Lstat(filename) + } else { + return false, ErrInvalidLinkTreatment + } + + if os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +// ReadDirNoStat returns a string of files/directories contained +// in dirname without calling lstat on them. +func ReadDirNoStat(dirname string) ([]string, error) { + if dirname == "" { + dirname = "." + } + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + return f.Readdirnames(-1) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index fc244d3f58..2cd6722c17 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -13,6 +13,8 @@ github.com/Azure/go-autorest/autorest/date github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 github.com/Azure/go-autorest/tracing +# github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 +github.com/NYTimes/gziphandler # github.com/PuerkitoBio/purell v1.1.1 github.com/PuerkitoBio/purell # github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 @@ -27,6 +29,13 @@ github.com/cespare/xxhash/v2 ## explicit github.com/client9/misspell github.com/client9/misspell/cmd/misspell +# github.com/coreos/go-semver v0.3.0 +github.com/coreos/go-semver/semver +# github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e +github.com/coreos/go-systemd/daemon +github.com/coreos/go-systemd/journal +# github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f +github.com/coreos/pkg/capnslog # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew # github.com/emicklei/go-restful v2.9.5+incompatible @@ -48,7 +57,9 @@ github.com/go-openapi/spec github.com/go-openapi/swag # github.com/gogo/protobuf v1.3.2 ## explicit +github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto +github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys # github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e github.com/golang/groupcache/lru @@ -66,10 +77,14 @@ github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value # github.com/google/gofuzz v1.1.0 github.com/google/gofuzz +# github.com/google/uuid v1.1.2 +github.com/google/uuid # github.com/googleapis/gnostic v0.4.1 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions github.com/googleapis/gnostic/openapiv2 +# github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 +github.com/grpc-ecosystem/go-grpc-prometheus # github.com/hashicorp/golang-lru v0.5.1 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru @@ -89,12 +104,16 @@ github.com/matttproud/golang_protobuf_extensions/pbutil github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 +# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 +github.com/munnerz/goautoneg # github.com/pkg/errors v0.9.1 github.com/pkg/errors # github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp +github.com/prometheus/client_golang/prometheus/testutil +github.com/prometheus/client_golang/prometheus/testutil/promlint # github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model/go # github.com/prometheus/common v0.10.0 @@ -111,6 +130,29 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit github.com/spf13/pflag +# go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 +go.etcd.io/etcd/auth/authpb +go.etcd.io/etcd/clientv3 +go.etcd.io/etcd/clientv3/balancer +go.etcd.io/etcd/clientv3/balancer/connectivity +go.etcd.io/etcd/clientv3/balancer/picker +go.etcd.io/etcd/clientv3/balancer/resolver/endpoint +go.etcd.io/etcd/clientv3/credentials +go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes +go.etcd.io/etcd/etcdserver/etcdserverpb +go.etcd.io/etcd/mvcc/mvccpb +go.etcd.io/etcd/pkg/fileutil +go.etcd.io/etcd/pkg/logutil +go.etcd.io/etcd/pkg/systemd +go.etcd.io/etcd/pkg/tlsutil +go.etcd.io/etcd/pkg/transport +go.etcd.io/etcd/pkg/types +go.etcd.io/etcd/raft +go.etcd.io/etcd/raft/confchange +go.etcd.io/etcd/raft/quorum +go.etcd.io/etcd/raft/raftpb +go.etcd.io/etcd/raft/tracker +go.etcd.io/etcd/version # go.uber.org/atomic v1.4.0 go.uber.org/atomic # go.uber.org/multierr v1.1.0 @@ -123,8 +165,14 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore # golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/cryptobyte/asn1 +golang.org/x/crypto/internal/subtle +golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 +golang.org/x/crypto/poly1305 +golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/ssh/terminal # golang.org/x/mod v0.3.0 golang.org/x/mod/module @@ -136,13 +184,19 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/timeseries +golang.org/x/net/trace +golang.org/x/net/websocket # golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/oauth2 golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt +# golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 +golang.org/x/sync/singleflight # golang.org/x/sys v0.0.0-20201112073958-5cba982894dd +golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows @@ -179,6 +233,47 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch +# google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a +google.golang.org/genproto/googleapis/rpc/status +# google.golang.org/grpc v1.27.1 +google.golang.org/grpc +google.golang.org/grpc/attributes +google.golang.org/grpc/backoff +google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/codes +google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/internal +google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog +google.golang.org/grpc/internal +google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancerload +google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/buffer +google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpcrand +google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/syscall +google.golang.org/grpc/internal/transport +google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata +google.golang.org/grpc/naming +google.golang.org/grpc/peer +google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns +google.golang.org/grpc/resolver/passthrough +google.golang.org/grpc/serviceconfig +google.golang.org/grpc/stats +google.golang.org/grpc/status +google.golang.org/grpc/tap # google.golang.org/protobuf v1.25.0 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -211,10 +306,14 @@ google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 +# gopkg.in/natefinch/lumberjack.v2 v2.0.0 +gopkg.in/natefinch/lumberjack.v2 # gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 # k8s.io/api v0.20.0 ## explicit +k8s.io/api/admission/v1 +k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apiserverinternal/v1alpha1 @@ -260,13 +359,20 @@ k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 # k8s.io/apimachinery v0.20.0 ## explicit +k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/api/validation +k8s.io/apimachinery/pkg/api/validation/path k8s.io/apimachinery/pkg/apis/meta/internalversion +k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme +k8s.io/apimachinery/pkg/apis/meta/internalversion/validation k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/apis/meta/v1/validation k8s.io/apimachinery/pkg/apis/meta/v1beta1 +k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields @@ -291,12 +397,15 @@ k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/rand k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/strategicpatch +k8s.io/apimachinery/pkg/util/uuid k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/util/wait +k8s.io/apimachinery/pkg/util/waitgroup k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch @@ -304,7 +413,129 @@ k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/apiserver v0.20.0 ## explicit +k8s.io/apiserver/pkg/admission +k8s.io/apiserver/pkg/admission/configuration +k8s.io/apiserver/pkg/admission/initializer +k8s.io/apiserver/pkg/admission/metrics +k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle +k8s.io/apiserver/pkg/admission/plugin/webhook +k8s.io/apiserver/pkg/admission/plugin/webhook/config +k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission +k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1 +k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1 +k8s.io/apiserver/pkg/admission/plugin/webhook/errors +k8s.io/apiserver/pkg/admission/plugin/webhook/generic +k8s.io/apiserver/pkg/admission/plugin/webhook/mutating +k8s.io/apiserver/pkg/admission/plugin/webhook/namespace +k8s.io/apiserver/pkg/admission/plugin/webhook/object +k8s.io/apiserver/pkg/admission/plugin/webhook/request +k8s.io/apiserver/pkg/admission/plugin/webhook/rules +k8s.io/apiserver/pkg/admission/plugin/webhook/validating +k8s.io/apiserver/pkg/apis/apiserver +k8s.io/apiserver/pkg/apis/apiserver/install +k8s.io/apiserver/pkg/apis/apiserver/v1 +k8s.io/apiserver/pkg/apis/apiserver/v1alpha1 +k8s.io/apiserver/pkg/apis/apiserver/v1beta1 +k8s.io/apiserver/pkg/apis/audit +k8s.io/apiserver/pkg/apis/audit/install +k8s.io/apiserver/pkg/apis/audit/v1 +k8s.io/apiserver/pkg/apis/audit/v1alpha1 +k8s.io/apiserver/pkg/apis/audit/v1beta1 +k8s.io/apiserver/pkg/apis/audit/validation +k8s.io/apiserver/pkg/apis/config +k8s.io/apiserver/pkg/apis/config/v1 +k8s.io/apiserver/pkg/apis/config/validation +k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap +k8s.io/apiserver/pkg/audit +k8s.io/apiserver/pkg/audit/policy +k8s.io/apiserver/pkg/authentication/authenticator +k8s.io/apiserver/pkg/authentication/authenticatorfactory +k8s.io/apiserver/pkg/authentication/group +k8s.io/apiserver/pkg/authentication/request/anonymous +k8s.io/apiserver/pkg/authentication/request/bearertoken +k8s.io/apiserver/pkg/authentication/request/headerrequest +k8s.io/apiserver/pkg/authentication/request/union +k8s.io/apiserver/pkg/authentication/request/websocket +k8s.io/apiserver/pkg/authentication/request/x509 +k8s.io/apiserver/pkg/authentication/serviceaccount +k8s.io/apiserver/pkg/authentication/token/cache +k8s.io/apiserver/pkg/authentication/token/tokenfile +k8s.io/apiserver/pkg/authentication/user +k8s.io/apiserver/pkg/authorization/authorizer +k8s.io/apiserver/pkg/authorization/authorizerfactory +k8s.io/apiserver/pkg/authorization/path +k8s.io/apiserver/pkg/authorization/union +k8s.io/apiserver/pkg/endpoints +k8s.io/apiserver/pkg/endpoints/deprecation +k8s.io/apiserver/pkg/endpoints/discovery +k8s.io/apiserver/pkg/endpoints/filterlatency +k8s.io/apiserver/pkg/endpoints/filters +k8s.io/apiserver/pkg/endpoints/handlers +k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager +k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal +k8s.io/apiserver/pkg/endpoints/handlers/negotiation +k8s.io/apiserver/pkg/endpoints/handlers/responsewriters +k8s.io/apiserver/pkg/endpoints/metrics +k8s.io/apiserver/pkg/endpoints/openapi +k8s.io/apiserver/pkg/endpoints/request +k8s.io/apiserver/pkg/endpoints/warning +k8s.io/apiserver/pkg/features +k8s.io/apiserver/pkg/quota/v1 +k8s.io/apiserver/pkg/registry/generic +k8s.io/apiserver/pkg/registry/generic/registry +k8s.io/apiserver/pkg/registry/rest +k8s.io/apiserver/pkg/server +k8s.io/apiserver/pkg/server/dynamiccertificates +k8s.io/apiserver/pkg/server/egressselector +k8s.io/apiserver/pkg/server/egressselector/metrics +k8s.io/apiserver/pkg/server/filters +k8s.io/apiserver/pkg/server/healthz +k8s.io/apiserver/pkg/server/httplog +k8s.io/apiserver/pkg/server/mux +k8s.io/apiserver/pkg/server/options +k8s.io/apiserver/pkg/server/options/encryptionconfig +k8s.io/apiserver/pkg/server/resourceconfig +k8s.io/apiserver/pkg/server/routes +k8s.io/apiserver/pkg/server/storage +k8s.io/apiserver/pkg/storage +k8s.io/apiserver/pkg/storage/cacher +k8s.io/apiserver/pkg/storage/errors +k8s.io/apiserver/pkg/storage/etcd3 +k8s.io/apiserver/pkg/storage/etcd3/metrics +k8s.io/apiserver/pkg/storage/names +k8s.io/apiserver/pkg/storage/storagebackend +k8s.io/apiserver/pkg/storage/storagebackend/factory +k8s.io/apiserver/pkg/storage/value +k8s.io/apiserver/pkg/storage/value/encrypt/aes +k8s.io/apiserver/pkg/storage/value/encrypt/envelope +k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1 +k8s.io/apiserver/pkg/storage/value/encrypt/identity +k8s.io/apiserver/pkg/storage/value/encrypt/secretbox +k8s.io/apiserver/pkg/storageversion +k8s.io/apiserver/pkg/util/apihelpers +k8s.io/apiserver/pkg/util/dryrun k8s.io/apiserver/pkg/util/feature +k8s.io/apiserver/pkg/util/flowcontrol +k8s.io/apiserver/pkg/util/flowcontrol/counter +k8s.io/apiserver/pkg/util/flowcontrol/debug +k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing +k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise +k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise +k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset +k8s.io/apiserver/pkg/util/flowcontrol/format +k8s.io/apiserver/pkg/util/flowcontrol/metrics +k8s.io/apiserver/pkg/util/flushwriter +k8s.io/apiserver/pkg/util/openapi +k8s.io/apiserver/pkg/util/shufflesharding +k8s.io/apiserver/pkg/util/webhook +k8s.io/apiserver/pkg/util/wsstream +k8s.io/apiserver/pkg/warning +k8s.io/apiserver/plugin/pkg/audit/buffered +k8s.io/apiserver/plugin/pkg/audit/log +k8s.io/apiserver/plugin/pkg/audit/truncate +k8s.io/apiserver/plugin/pkg/audit/webhook +k8s.io/apiserver/plugin/pkg/authenticator/token/webhook +k8s.io/apiserver/plugin/pkg/authorizer/webhook # k8s.io/client-go v0.20.0 ## explicit k8s.io/client-go/discovery @@ -517,6 +748,7 @@ k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/tools/clientcmd/api/v1 +k8s.io/client-go/tools/events k8s.io/client-go/tools/metrics k8s.io/client-go/tools/pager k8s.io/client-go/tools/record @@ -576,6 +808,8 @@ k8s.io/component-base/logs/json k8s.io/component-base/logs/sanitization k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry +k8s.io/component-base/metrics/prometheus/workqueue +k8s.io/component-base/metrics/testutil k8s.io/component-base/version # k8s.io/component-helpers v0.20.0 ## explicit @@ -597,16 +831,29 @@ k8s.io/gengo/types k8s.io/klog/v2 # k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd k8s.io/kube-openapi/cmd/openapi-gen/args +k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/generators k8s.io/kube-openapi/pkg/generators/rules +k8s.io/kube-openapi/pkg/handler +k8s.io/kube-openapi/pkg/schemaconv +k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/sets # k8s.io/utils v0.0.0-20201110183641-67b214c5f920 k8s.io/utils/buffer k8s.io/utils/integer +k8s.io/utils/net +k8s.io/utils/path k8s.io/utils/trace +# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14 +sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client +sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client # sigs.k8s.io/structured-merge-diff/v4 v4.0.2 +sigs.k8s.io/structured-merge-diff/v4/fieldpath +sigs.k8s.io/structured-merge-diff/v4/merge +sigs.k8s.io/structured-merge-diff/v4/schema +sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/LICENSE b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go new file mode 100644 index 0000000000..3ccec331a1 --- /dev/null +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go @@ -0,0 +1,205 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "errors" + "io" + "math/rand" + "net" + "sync" + "time" + + "google.golang.org/grpc" + "k8s.io/klog/v2" + "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client" +) + +// Tunnel provides ability to dial a connection through a tunnel. +type Tunnel interface { + // Dial connects to the address on the named network, similar to + // what net.Dial does. The only supported protocol is tcp. + Dial(protocol, address string) (net.Conn, error) +} + +type dialResult struct { + err string + connid int64 +} + +// grpcTunnel implements Tunnel +type grpcTunnel struct { + stream client.ProxyService_ProxyClient + pendingDial map[int64]chan<- dialResult + conns map[int64]*conn + pendingDialLock sync.RWMutex + connsLock sync.RWMutex +} + +type clientConn interface { + Close() error +} + +var _ clientConn = &grpc.ClientConn{} + +// CreateSingleUseGrpcTunnel creates a Tunnel to dial to a remote server through a +// gRPC based proxy service. +// Currently, a single tunnel supports a single connection, and the tunnel is closed when the connection is terminated +// The Dial() method of the returned tunnel should only be called once +func CreateSingleUseGrpcTunnel(address string, opts ...grpc.DialOption) (Tunnel, error) { + c, err := grpc.Dial(address, opts...) + if err != nil { + return nil, err + } + + grpcClient := client.NewProxyServiceClient(c) + + stream, err := grpcClient.Proxy(context.Background()) + if err != nil { + return nil, err + } + + tunnel := &grpcTunnel{ + stream: stream, + pendingDial: make(map[int64]chan<- dialResult), + conns: make(map[int64]*conn), + } + + go tunnel.serve(c) + + return tunnel, nil +} + +func (t *grpcTunnel) serve(c clientConn) { + defer c.Close() + + for { + pkt, err := t.stream.Recv() + if err == io.EOF { + return + } + if err != nil || pkt == nil { + klog.ErrorS(err, "stream read failure") + return + } + + klog.V(5).InfoS("[tracing] recv packet", "type", pkt.Type) + + switch pkt.Type { + case client.PacketType_DIAL_RSP: + resp := pkt.GetDialResponse() + t.pendingDialLock.RLock() + ch, ok := t.pendingDial[resp.Random] + t.pendingDialLock.RUnlock() + + if !ok { + klog.V(1).Infoln("DialResp not recognized; dropped") + } else { + ch <- dialResult{ + err: resp.Error, + connid: resp.ConnectID, + } + } + case client.PacketType_DATA: + resp := pkt.GetData() + // TODO: flow control + t.connsLock.RLock() + conn, ok := t.conns[resp.ConnectID] + t.connsLock.RUnlock() + + if ok { + conn.readCh <- resp.Data + } else { + klog.V(1).InfoS("connection not recognized", "connectionID", resp.ConnectID) + } + case client.PacketType_CLOSE_RSP: + resp := pkt.GetCloseResponse() + t.connsLock.RLock() + conn, ok := t.conns[resp.ConnectID] + t.connsLock.RUnlock() + + if ok { + close(conn.readCh) + conn.closeCh <- resp.Error + close(conn.closeCh) + t.connsLock.Lock() + delete(t.conns, resp.ConnectID) + t.connsLock.Unlock() + return + } + klog.V(1).InfoS("connection not recognized", "connectionID", resp.ConnectID) + } + } +} + +// Dial connects to the address on the named network, similar to +// what net.Dial does. The only supported protocol is tcp. +func (t *grpcTunnel) Dial(protocol, address string) (net.Conn, error) { + if protocol != "tcp" { + return nil, errors.New("protocol not supported") + } + + random := rand.Int63() + resCh := make(chan dialResult) + t.pendingDialLock.Lock() + t.pendingDial[random] = resCh + t.pendingDialLock.Unlock() + defer func() { + t.pendingDialLock.Lock() + delete(t.pendingDial, random) + t.pendingDialLock.Unlock() + }() + + req := &client.Packet{ + Type: client.PacketType_DIAL_REQ, + Payload: &client.Packet_DialRequest{ + DialRequest: &client.DialRequest{ + Protocol: protocol, + Address: address, + Random: random, + }, + }, + } + klog.V(5).InfoS("[tracing] send packet", "type", req.Type) + + err := t.stream.Send(req) + if err != nil { + return nil, err + } + + klog.V(5).Infoln("DIAL_REQ sent to proxy server") + + c := &conn{stream: t.stream} + + select { + case res := <-resCh: + if res.err != "" { + return nil, errors.New(res.err) + } + c.connID = res.connid + c.readCh = make(chan []byte, 10) + c.closeCh = make(chan string) + t.connsLock.Lock() + t.conns[res.connid] = c + t.connsLock.Unlock() + case <-time.After(30 * time.Second): + return nil, errors.New("dial timeout") + } + + return c, nil +} diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go new file mode 100644 index 0000000000..4a93c69cf7 --- /dev/null +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go @@ -0,0 +1,141 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "errors" + "io" + "net" + "time" + + "k8s.io/klog/v2" + "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client" +) + +// CloseTimeout is the timeout to wait CLOSE_RSP packet after a +// successful delivery of CLOSE_REQ. +const CloseTimeout = 10 * time.Second + +// conn is an implementation of net.Conn, where the data is transported +// over an established tunnel defined by a gRPC service ProxyService. +type conn struct { + stream client.ProxyService_ProxyClient + connID int64 + readCh chan []byte + closeCh chan string + rdata []byte +} + +var _ net.Conn = &conn{} + +// Write sends the data thru the connection over proxy service +func (c *conn) Write(data []byte) (n int, err error) { + req := &client.Packet{ + Type: client.PacketType_DATA, + Payload: &client.Packet_Data{ + Data: &client.Data{ + ConnectID: c.connID, + Data: data, + }, + }, + } + + klog.V(5).InfoS("[tracing] send req", "type", req.Type) + + err = c.stream.Send(req) + if err != nil { + return 0, err + } + return len(data), err +} + +// Read receives data from the connection over proxy service +func (c *conn) Read(b []byte) (n int, err error) { + var data []byte + + if c.rdata != nil { + data = c.rdata + } else { + data = <-c.readCh + } + + if data == nil { + return 0, io.EOF + } + + if len(data) > len(b) { + copy(b, data[:len(b)]) + c.rdata = data[len(b):] + return len(b), nil + } + + c.rdata = nil + copy(b, data) + + return len(data), nil +} + +func (c *conn) LocalAddr() net.Addr { + return nil +} + +func (c *conn) RemoteAddr() net.Addr { + return nil +} + +func (c *conn) SetDeadline(t time.Time) error { + return errors.New("not implemented") +} + +func (c *conn) SetReadDeadline(t time.Time) error { + return errors.New("not implemented") +} + +func (c *conn) SetWriteDeadline(t time.Time) error { + return errors.New("not implemented") +} + +// Close closes the connection. It also sends CLOSE_REQ packet over +// proxy service to notify remote to drop the connection. +func (c *conn) Close() error { + klog.V(4).Infoln("closing connection") + req := &client.Packet{ + Type: client.PacketType_CLOSE_REQ, + Payload: &client.Packet_CloseRequest{ + CloseRequest: &client.CloseRequest{ + ConnectID: c.connID, + }, + }, + } + + klog.V(5).InfoS("[tracing] send req", "type", req.Type) + + if err := c.stream.Send(req); err != nil { + return err + } + + select { + case errMsg := <-c.closeCh: + if errMsg != "" { + return errors.New(errMsg) + } + return nil + case <-time.After(CloseTimeout): + } + + return errors.New("close timeout") +} diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go new file mode 100644 index 0000000000..107f2e546f --- /dev/null +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go @@ -0,0 +1,653 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: konnectivity-client/proto/client/client.proto + +package client + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type PacketType int32 + +const ( + PacketType_DIAL_REQ PacketType = 0 + PacketType_DIAL_RSP PacketType = 1 + PacketType_CLOSE_REQ PacketType = 2 + PacketType_CLOSE_RSP PacketType = 3 + PacketType_DATA PacketType = 4 +) + +var PacketType_name = map[int32]string{ + 0: "DIAL_REQ", + 1: "DIAL_RSP", + 2: "CLOSE_REQ", + 3: "CLOSE_RSP", + 4: "DATA", +} + +var PacketType_value = map[string]int32{ + "DIAL_REQ": 0, + "DIAL_RSP": 1, + "CLOSE_REQ": 2, + "CLOSE_RSP": 3, + "DATA": 4, +} + +func (x PacketType) String() string { + return proto.EnumName(PacketType_name, int32(x)) +} + +func (PacketType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_fec4258d9ecd175d, []int{0} +} + +type Error int32 + +const ( + Error_EOF Error = 0 +) + +var Error_name = map[int32]string{ + 0: "EOF", +} + +var Error_value = map[string]int32{ + "EOF": 0, +} + +func (x Error) String() string { + return proto.EnumName(Error_name, int32(x)) +} + +func (Error) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_fec4258d9ecd175d, []int{1} +} + +type Packet struct { + Type PacketType `protobuf:"varint,1,opt,name=type,proto3,enum=PacketType" json:"type,omitempty"` + // Types that are valid to be assigned to Payload: + // *Packet_DialRequest + // *Packet_DialResponse + // *Packet_Data + // *Packet_CloseRequest + // *Packet_CloseResponse + Payload isPacket_Payload `protobuf_oneof:"payload"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Packet) Reset() { *m = Packet{} } +func (m *Packet) String() string { return proto.CompactTextString(m) } +func (*Packet) ProtoMessage() {} +func (*Packet) Descriptor() ([]byte, []int) { + return fileDescriptor_fec4258d9ecd175d, []int{0} +} + +func (m *Packet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Packet.Unmarshal(m, b) +} +func (m *Packet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Packet.Marshal(b, m, deterministic) +} +func (m *Packet) XXX_Merge(src proto.Message) { + xxx_messageInfo_Packet.Merge(m, src) +} +func (m *Packet) XXX_Size() int { + return xxx_messageInfo_Packet.Size(m) +} +func (m *Packet) XXX_DiscardUnknown() { + xxx_messageInfo_Packet.DiscardUnknown(m) +} + +var xxx_messageInfo_Packet proto.InternalMessageInfo + +func (m *Packet) GetType() PacketType { + if m != nil { + return m.Type + } + return PacketType_DIAL_REQ +} + +type isPacket_Payload interface { + isPacket_Payload() +} + +type Packet_DialRequest struct { + DialRequest *DialRequest `protobuf:"bytes,2,opt,name=dialRequest,proto3,oneof"` +} + +type Packet_DialResponse struct { + DialResponse *DialResponse `protobuf:"bytes,3,opt,name=dialResponse,proto3,oneof"` +} + +type Packet_Data struct { + Data *Data `protobuf:"bytes,4,opt,name=data,proto3,oneof"` +} + +type Packet_CloseRequest struct { + CloseRequest *CloseRequest `protobuf:"bytes,5,opt,name=closeRequest,proto3,oneof"` +} + +type Packet_CloseResponse struct { + CloseResponse *CloseResponse `protobuf:"bytes,6,opt,name=closeResponse,proto3,oneof"` +} + +func (*Packet_DialRequest) isPacket_Payload() {} + +func (*Packet_DialResponse) isPacket_Payload() {} + +func (*Packet_Data) isPacket_Payload() {} + +func (*Packet_CloseRequest) isPacket_Payload() {} + +func (*Packet_CloseResponse) isPacket_Payload() {} + +func (m *Packet) GetPayload() isPacket_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *Packet) GetDialRequest() *DialRequest { + if x, ok := m.GetPayload().(*Packet_DialRequest); ok { + return x.DialRequest + } + return nil +} + +func (m *Packet) GetDialResponse() *DialResponse { + if x, ok := m.GetPayload().(*Packet_DialResponse); ok { + return x.DialResponse + } + return nil +} + +func (m *Packet) GetData() *Data { + if x, ok := m.GetPayload().(*Packet_Data); ok { + return x.Data + } + return nil +} + +func (m *Packet) GetCloseRequest() *CloseRequest { + if x, ok := m.GetPayload().(*Packet_CloseRequest); ok { + return x.CloseRequest + } + return nil +} + +func (m *Packet) GetCloseResponse() *CloseResponse { + if x, ok := m.GetPayload().(*Packet_CloseResponse); ok { + return x.CloseResponse + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Packet) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Packet_DialRequest)(nil), + (*Packet_DialResponse)(nil), + (*Packet_Data)(nil), + (*Packet_CloseRequest)(nil), + (*Packet_CloseResponse)(nil), + } +} + +type DialRequest struct { + // tcp or udp? + Protocol string `protobuf:"bytes,1,opt,name=protocol,proto3" json:"protocol,omitempty"` + // node:port + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // random id for client, maybe should be longer + Random int64 `protobuf:"varint,3,opt,name=random,proto3" json:"random,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DialRequest) Reset() { *m = DialRequest{} } +func (m *DialRequest) String() string { return proto.CompactTextString(m) } +func (*DialRequest) ProtoMessage() {} +func (*DialRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fec4258d9ecd175d, []int{1} +} + +func (m *DialRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DialRequest.Unmarshal(m, b) +} +func (m *DialRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DialRequest.Marshal(b, m, deterministic) +} +func (m *DialRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DialRequest.Merge(m, src) +} +func (m *DialRequest) XXX_Size() int { + return xxx_messageInfo_DialRequest.Size(m) +} +func (m *DialRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DialRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DialRequest proto.InternalMessageInfo + +func (m *DialRequest) GetProtocol() string { + if m != nil { + return m.Protocol + } + return "" +} + +func (m *DialRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *DialRequest) GetRandom() int64 { + if m != nil { + return m.Random + } + return 0 +} + +type DialResponse struct { + // error failed reason; enum? + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + // connectID indicates the identifier of the connection + ConnectID int64 `protobuf:"varint,2,opt,name=connectID,proto3" json:"connectID,omitempty"` + // random copied from DialRequest + Random int64 `protobuf:"varint,3,opt,name=random,proto3" json:"random,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DialResponse) Reset() { *m = DialResponse{} } +func (m *DialResponse) String() string { return proto.CompactTextString(m) } +func (*DialResponse) ProtoMessage() {} +func (*DialResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_fec4258d9ecd175d, []int{2} +} + +func (m *DialResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DialResponse.Unmarshal(m, b) +} +func (m *DialResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DialResponse.Marshal(b, m, deterministic) +} +func (m *DialResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DialResponse.Merge(m, src) +} +func (m *DialResponse) XXX_Size() int { + return xxx_messageInfo_DialResponse.Size(m) +} +func (m *DialResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DialResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DialResponse proto.InternalMessageInfo + +func (m *DialResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *DialResponse) GetConnectID() int64 { + if m != nil { + return m.ConnectID + } + return 0 +} + +func (m *DialResponse) GetRandom() int64 { + if m != nil { + return m.Random + } + return 0 +} + +type CloseRequest struct { + // connectID of the stream to close + ConnectID int64 `protobuf:"varint,1,opt,name=connectID,proto3" json:"connectID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseRequest) Reset() { *m = CloseRequest{} } +func (m *CloseRequest) String() string { return proto.CompactTextString(m) } +func (*CloseRequest) ProtoMessage() {} +func (*CloseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fec4258d9ecd175d, []int{3} +} + +func (m *CloseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseRequest.Unmarshal(m, b) +} +func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) +} +func (m *CloseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseRequest.Merge(m, src) +} +func (m *CloseRequest) XXX_Size() int { + return xxx_messageInfo_CloseRequest.Size(m) +} +func (m *CloseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CloseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseRequest proto.InternalMessageInfo + +func (m *CloseRequest) GetConnectID() int64 { + if m != nil { + return m.ConnectID + } + return 0 +} + +type CloseResponse struct { + // error message + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + // connectID indicates the identifier of the connection + ConnectID int64 `protobuf:"varint,2,opt,name=connectID,proto3" json:"connectID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseResponse) Reset() { *m = CloseResponse{} } +func (m *CloseResponse) String() string { return proto.CompactTextString(m) } +func (*CloseResponse) ProtoMessage() {} +func (*CloseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_fec4258d9ecd175d, []int{4} +} + +func (m *CloseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseResponse.Unmarshal(m, b) +} +func (m *CloseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseResponse.Marshal(b, m, deterministic) +} +func (m *CloseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseResponse.Merge(m, src) +} +func (m *CloseResponse) XXX_Size() int { + return xxx_messageInfo_CloseResponse.Size(m) +} +func (m *CloseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CloseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseResponse proto.InternalMessageInfo + +func (m *CloseResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *CloseResponse) GetConnectID() int64 { + if m != nil { + return m.ConnectID + } + return 0 +} + +type Data struct { + // connectID to connect to + ConnectID int64 `protobuf:"varint,1,opt,name=connectID,proto3" json:"connectID,omitempty"` + // error message if error happens + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + // stream data + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Data) Reset() { *m = Data{} } +func (m *Data) String() string { return proto.CompactTextString(m) } +func (*Data) ProtoMessage() {} +func (*Data) Descriptor() ([]byte, []int) { + return fileDescriptor_fec4258d9ecd175d, []int{5} +} + +func (m *Data) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Data.Unmarshal(m, b) +} +func (m *Data) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Data.Marshal(b, m, deterministic) +} +func (m *Data) XXX_Merge(src proto.Message) { + xxx_messageInfo_Data.Merge(m, src) +} +func (m *Data) XXX_Size() int { + return xxx_messageInfo_Data.Size(m) +} +func (m *Data) XXX_DiscardUnknown() { + xxx_messageInfo_Data.DiscardUnknown(m) +} + +var xxx_messageInfo_Data proto.InternalMessageInfo + +func (m *Data) GetConnectID() int64 { + if m != nil { + return m.ConnectID + } + return 0 +} + +func (m *Data) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *Data) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterEnum("PacketType", PacketType_name, PacketType_value) + proto.RegisterEnum("Error", Error_name, Error_value) + proto.RegisterType((*Packet)(nil), "Packet") + proto.RegisterType((*DialRequest)(nil), "DialRequest") + proto.RegisterType((*DialResponse)(nil), "DialResponse") + proto.RegisterType((*CloseRequest)(nil), "CloseRequest") + proto.RegisterType((*CloseResponse)(nil), "CloseResponse") + proto.RegisterType((*Data)(nil), "Data") +} + +func init() { + proto.RegisterFile("konnectivity-client/proto/client/client.proto", fileDescriptor_fec4258d9ecd175d) +} + +var fileDescriptor_fec4258d9ecd175d = []byte{ + // 472 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xd1, 0x6e, 0x9b, 0x30, + 0x14, 0x85, 0x00, 0x49, 0xb8, 0x21, 0x15, 0xb2, 0xa6, 0x09, 0x75, 0x93, 0x5a, 0xf1, 0x14, 0x55, + 0x0b, 0x54, 0xa9, 0x34, 0xed, 0x35, 0x0d, 0xa9, 0x52, 0xa9, 0x5a, 0x99, 0xd3, 0xa7, 0xee, 0x61, + 0xf2, 0xc0, 0x9a, 0x50, 0x18, 0x66, 0xb6, 0x97, 0x8d, 0x0f, 0xda, 0x7f, 0x4e, 0x18, 0x52, 0xc8, + 0xa4, 0x6d, 0x52, 0x9f, 0xe0, 0x1c, 0xdf, 0x73, 0x7c, 0x7d, 0xae, 0x0d, 0xf3, 0x1d, 0x2b, 0x0a, + 0x9a, 0xc8, 0x6c, 0x9f, 0xc9, 0x6a, 0x9e, 0xe4, 0x19, 0x2d, 0x64, 0x58, 0x72, 0x26, 0x59, 0xd8, + 0x82, 0xe6, 0x13, 0x28, 0xce, 0xff, 0x35, 0x80, 0x61, 0x4c, 0x92, 0x1d, 0x95, 0xe8, 0x0c, 0x4c, + 0x59, 0x95, 0xd4, 0xd3, 0xcf, 0xf5, 0xd9, 0xc9, 0x62, 0x12, 0x34, 0xf4, 0x43, 0x55, 0x52, 0xac, + 0x16, 0xd0, 0x25, 0x4c, 0xd2, 0x8c, 0xe4, 0x98, 0x7e, 0xfb, 0x4e, 0x85, 0xf4, 0x06, 0xe7, 0xfa, + 0x6c, 0xb2, 0x70, 0x82, 0xa8, 0xe3, 0x36, 0x1a, 0xee, 0x97, 0xa0, 0x2b, 0x70, 0x1a, 0x28, 0x4a, + 0x56, 0x08, 0xea, 0x19, 0x4a, 0x32, 0x6d, 0x25, 0x0d, 0xb9, 0xd1, 0xf0, 0x51, 0x11, 0x7a, 0x05, + 0x66, 0x4a, 0x24, 0xf1, 0x4c, 0x55, 0x6c, 0x05, 0x11, 0x91, 0x64, 0xa3, 0x61, 0x45, 0xd6, 0x8e, + 0x49, 0xce, 0x04, 0x3d, 0x34, 0x61, 0xb5, 0x8e, 0xab, 0x1e, 0x59, 0x3b, 0xf6, 0x8b, 0xd0, 0x5b, + 0x98, 0xb6, 0xb8, 0xed, 0x63, 0xa8, 0x54, 0x27, 0x07, 0xd5, 0x53, 0x23, 0xc7, 0x65, 0xd7, 0x36, + 0x8c, 0x4a, 0x52, 0xe5, 0x8c, 0xa4, 0xfe, 0x47, 0x98, 0xf4, 0xce, 0x89, 0x4e, 0x61, 0xac, 0xf2, + 0x4b, 0x58, 0xae, 0xf2, 0xb2, 0xf1, 0x13, 0x46, 0x1e, 0x8c, 0x48, 0x9a, 0x72, 0x2a, 0x84, 0x8a, + 0xc8, 0xc6, 0x07, 0x88, 0x5e, 0xc2, 0x90, 0x93, 0x22, 0x65, 0x5f, 0x55, 0x10, 0x06, 0x6e, 0x91, + 0xff, 0x08, 0x4e, 0x3f, 0x11, 0xf4, 0x02, 0x2c, 0xca, 0x39, 0xe3, 0xad, 0x75, 0x03, 0xd0, 0x6b, + 0xb0, 0x93, 0x66, 0xb6, 0xb7, 0x91, 0x72, 0x36, 0x70, 0x47, 0xfc, 0xd5, 0xfb, 0x0d, 0x38, 0xfd, + 0x6c, 0x8e, 0x5d, 0xf4, 0x3f, 0x5c, 0xfc, 0x15, 0x4c, 0x8f, 0x32, 0x79, 0x4e, 0x2b, 0xfe, 0x7b, + 0x30, 0xeb, 0x99, 0xfd, 0x7b, 0xab, 0xce, 0x79, 0xd0, 0x77, 0x46, 0xed, 0xf0, 0xeb, 0x43, 0x38, + 0xcd, 0xcc, 0x2f, 0x62, 0x80, 0xee, 0x2e, 0x22, 0x07, 0xc6, 0xd1, 0xed, 0xf2, 0xee, 0x13, 0x5e, + 0x7f, 0x70, 0xb5, 0x0e, 0x6d, 0x63, 0x57, 0x47, 0x53, 0xb0, 0x57, 0x77, 0xf7, 0xdb, 0xb5, 0x5a, + 0x1c, 0xf4, 0xe0, 0x36, 0x76, 0x0d, 0x34, 0x06, 0x33, 0x5a, 0x3e, 0x2c, 0x5d, 0xf3, 0xc2, 0x05, + 0x6b, 0xad, 0xb6, 0x1b, 0x81, 0xb1, 0xbe, 0xbf, 0x71, 0xb5, 0x45, 0x08, 0x4e, 0xcc, 0xd9, 0xcf, + 0x6a, 0x4b, 0xf9, 0x3e, 0x4b, 0x28, 0x3a, 0x03, 0x4b, 0x61, 0x34, 0x6a, 0xdf, 0xc1, 0xe9, 0xe1, + 0xc7, 0xd7, 0x66, 0xfa, 0xa5, 0x7e, 0x7d, 0xf3, 0x18, 0x89, 0xec, 0x8b, 0x08, 0x76, 0xef, 0x44, + 0x90, 0xb1, 0x90, 0x94, 0x99, 0xa0, 0x7c, 0x4f, 0xf9, 0xbc, 0xa0, 0xf2, 0x07, 0xe3, 0xbb, 0x79, + 0x59, 0xcb, 0xc3, 0xff, 0xbd, 0xc6, 0xcf, 0x43, 0x85, 0xae, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, + 0x64, 0xe0, 0x62, 0xbe, 0xb8, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ProxyServiceClient is the client API for ProxyService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProxyServiceClient interface { + Proxy(ctx context.Context, opts ...grpc.CallOption) (ProxyService_ProxyClient, error) +} + +type proxyServiceClient struct { + cc *grpc.ClientConn +} + +func NewProxyServiceClient(cc *grpc.ClientConn) ProxyServiceClient { + return &proxyServiceClient{cc} +} + +func (c *proxyServiceClient) Proxy(ctx context.Context, opts ...grpc.CallOption) (ProxyService_ProxyClient, error) { + stream, err := c.cc.NewStream(ctx, &_ProxyService_serviceDesc.Streams[0], "/ProxyService/Proxy", opts...) + if err != nil { + return nil, err + } + x := &proxyServiceProxyClient{stream} + return x, nil +} + +type ProxyService_ProxyClient interface { + Send(*Packet) error + Recv() (*Packet, error) + grpc.ClientStream +} + +type proxyServiceProxyClient struct { + grpc.ClientStream +} + +func (x *proxyServiceProxyClient) Send(m *Packet) error { + return x.ClientStream.SendMsg(m) +} + +func (x *proxyServiceProxyClient) Recv() (*Packet, error) { + m := new(Packet) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ProxyServiceServer is the server API for ProxyService service. +type ProxyServiceServer interface { + Proxy(ProxyService_ProxyServer) error +} + +// UnimplementedProxyServiceServer can be embedded to have forward compatible implementations. +type UnimplementedProxyServiceServer struct { +} + +func (*UnimplementedProxyServiceServer) Proxy(srv ProxyService_ProxyServer) error { + return status.Errorf(codes.Unimplemented, "method Proxy not implemented") +} + +func RegisterProxyServiceServer(s *grpc.Server, srv ProxyServiceServer) { + s.RegisterService(&_ProxyService_serviceDesc, srv) +} + +func _ProxyService_Proxy_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ProxyServiceServer).Proxy(&proxyServiceProxyServer{stream}) +} + +type ProxyService_ProxyServer interface { + Send(*Packet) error + Recv() (*Packet, error) + grpc.ServerStream +} + +type proxyServiceProxyServer struct { + grpc.ServerStream +} + +func (x *proxyServiceProxyServer) Send(m *Packet) error { + return x.ServerStream.SendMsg(m) +} + +func (x *proxyServiceProxyServer) Recv() (*Packet, error) { + m := new(Packet) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _ProxyService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ProxyService", + HandlerType: (*ProxyServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Proxy", + Handler: _ProxyService_Proxy_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "konnectivity-client/proto/client/client.proto", +} diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto new file mode 100644 index 0000000000..3aadac0642 --- /dev/null +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto @@ -0,0 +1,95 @@ +// Copyright The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +// Retransmit? +// Sliding windows? + +option go_package = "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client"; + +service ProxyService { + rpc Proxy(stream Packet) returns (stream Packet) {} +} + +enum PacketType { + DIAL_REQ = 0; + DIAL_RSP = 1; + CLOSE_REQ = 2; + CLOSE_RSP = 3; + DATA = 4; +} + +enum Error { + EOF = 0; + // ... +} + +message Packet { + PacketType type = 1; + + oneof payload { + DialRequest dialRequest = 2; + DialResponse dialResponse = 3; + Data data = 4; + CloseRequest closeRequest = 5; + CloseResponse closeResponse = 6; + } +} + +message DialRequest { + // tcp or udp? + string protocol = 1; + + // node:port + string address = 2; + + // random id for client, maybe should be longer + int64 random = 3; +} + +message DialResponse { + // error failed reason; enum? + string error = 1; + + // connectID indicates the identifier of the connection + int64 connectID = 2; + + // random copied from DialRequest + int64 random = 3; +} + +message CloseRequest { + // connectID of the stream to close + int64 connectID = 1; +} + +message CloseResponse { + // error message + string error = 1; + + // connectID indicates the identifier of the connection + int64 connectID = 2; +} + +message Data { + // connectID to connect to + int64 connectID = 1; + + // error message if error happens + string error = 2; + + // stream data + bytes data = 3; +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/doc.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/doc.go new file mode 100644 index 0000000000..f4fbbff262 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fieldpath defines a way for referencing path elements (e.g., an +// index in an array, or a key in a map). It provides types for arranging these +// into paths for referencing nested fields, and for grouping those into sets, +// for referencing multiple nested fields. +package fieldpath diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/element.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/element.go new file mode 100644 index 0000000000..1578f64c04 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/element.go @@ -0,0 +1,317 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "sort" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// PathElement describes how to select a child field given a containing object. +type PathElement struct { + // Exactly one of the following fields should be non-nil. + + // FieldName selects a single field from a map (reminder: this is also + // how structs are represented). The containing object must be a map. + FieldName *string + + // Key selects the list element which has fields matching those given. + // The containing object must be an associative list with map typed + // elements. They are sorted alphabetically. + Key *value.FieldList + + // Value selects the list element with the given value. The containing + // object must be an associative list with a primitive typed element + // (i.e., a set). + Value *value.Value + + // Index selects a list element by its index number. The containing + // object must be an atomic list. + Index *int +} + +// Less provides an order for path elements. +func (e PathElement) Less(rhs PathElement) bool { + return e.Compare(rhs) < 0 +} + +// Compare provides an order for path elements. +func (e PathElement) Compare(rhs PathElement) int { + if e.FieldName != nil { + if rhs.FieldName == nil { + return -1 + } + return strings.Compare(*e.FieldName, *rhs.FieldName) + } else if rhs.FieldName != nil { + return 1 + } + + if e.Key != nil { + if rhs.Key == nil { + return -1 + } + return e.Key.Compare(*rhs.Key) + } else if rhs.Key != nil { + return 1 + } + + if e.Value != nil { + if rhs.Value == nil { + return -1 + } + return value.Compare(*e.Value, *rhs.Value) + } else if rhs.Value != nil { + return 1 + } + + if e.Index != nil { + if rhs.Index == nil { + return -1 + } + if *e.Index < *rhs.Index { + return -1 + } else if *e.Index == *rhs.Index { + return 0 + } + return 1 + } else if rhs.Index != nil { + return 1 + } + + return 0 +} + +// Equals returns true if both path elements are equal. +func (e PathElement) Equals(rhs PathElement) bool { + if e.FieldName != nil { + if rhs.FieldName == nil { + return false + } + return *e.FieldName == *rhs.FieldName + } else if rhs.FieldName != nil { + return false + } + if e.Key != nil { + if rhs.Key == nil { + return false + } + return e.Key.Equals(*rhs.Key) + } else if rhs.Key != nil { + return false + } + if e.Value != nil { + if rhs.Value == nil { + return false + } + return value.Equals(*e.Value, *rhs.Value) + } else if rhs.Value != nil { + return false + } + if e.Index != nil { + if rhs.Index == nil { + return false + } + return *e.Index == *rhs.Index + } else if rhs.Index != nil { + return false + } + return true +} + +// String presents the path element as a human-readable string. +func (e PathElement) String() string { + switch { + case e.FieldName != nil: + return "." + *e.FieldName + case e.Key != nil: + strs := make([]string, len(*e.Key)) + for i, k := range *e.Key { + strs[i] = fmt.Sprintf("%v=%v", k.Name, value.ToString(k.Value)) + } + // Keys are supposed to be sorted. + return "[" + strings.Join(strs, ",") + "]" + case e.Value != nil: + return fmt.Sprintf("[=%v]", value.ToString(*e.Value)) + case e.Index != nil: + return fmt.Sprintf("[%v]", *e.Index) + default: + return "{{invalid path element}}" + } +} + +// KeyByFields is a helper function which constructs a key for an associative +// list type. `nameValues` must have an even number of entries, alternating +// names (type must be string) with values (type must be value.Value). If these +// conditions are not met, KeyByFields will panic--it's intended for static +// construction and shouldn't have user-produced values passed to it. +func KeyByFields(nameValues ...interface{}) *value.FieldList { + if len(nameValues)%2 != 0 { + panic("must have a value for every name") + } + out := value.FieldList{} + for i := 0; i < len(nameValues)-1; i += 2 { + out = append(out, value.Field{Name: nameValues[i].(string), Value: value.NewValueInterface(nameValues[i+1])}) + } + out.Sort() + return &out +} + +// PathElementSet is a set of path elements. +// TODO: serialize as a list. +type PathElementSet struct { + members sortedPathElements +} + +func MakePathElementSet(size int) PathElementSet { + return PathElementSet{ + members: make(sortedPathElements, 0, size), + } +} + +type sortedPathElements []PathElement + +// Implement the sort interface; this would permit bulk creation, which would +// be faster than doing it one at a time via Insert. +func (spe sortedPathElements) Len() int { return len(spe) } +func (spe sortedPathElements) Less(i, j int) bool { return spe[i].Less(spe[j]) } +func (spe sortedPathElements) Swap(i, j int) { spe[i], spe[j] = spe[j], spe[i] } + +// Insert adds pe to the set. +func (s *PathElementSet) Insert(pe PathElement) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].Less(pe) + }) + if loc == len(s.members) { + s.members = append(s.members, pe) + return + } + if s.members[loc].Equals(pe) { + return + } + s.members = append(s.members, PathElement{}) + copy(s.members[loc+1:], s.members[loc:]) + s.members[loc] = pe +} + +// Union returns a set containing elements that appear in either s or s2. +func (s *PathElementSet) Union(s2 *PathElementSet) *PathElementSet { + out := &PathElementSet{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].Less(s2.members[j]) { + out.members = append(out.members, s.members[i]) + i++ + } else { + out.members = append(out.members, s2.members[j]) + if !s2.members[j].Less(s.members[i]) { + i++ + } + j++ + } + } + + if i < len(s.members) { + out.members = append(out.members, s.members[i:]...) + } + if j < len(s2.members) { + out.members = append(out.members, s2.members[j:]...) + } + return out +} + +// Intersection returns a set containing elements which appear in both s and s2. +func (s *PathElementSet) Intersection(s2 *PathElementSet) *PathElementSet { + out := &PathElementSet{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].Less(s2.members[j]) { + i++ + } else { + if !s2.members[j].Less(s.members[i]) { + out.members = append(out.members, s.members[i]) + i++ + } + j++ + } + } + + return out +} + +// Difference returns a set containing elements which appear in s but not in s2. +func (s *PathElementSet) Difference(s2 *PathElementSet) *PathElementSet { + out := &PathElementSet{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].Less(s2.members[j]) { + out.members = append(out.members, s.members[i]) + i++ + } else { + if !s2.members[j].Less(s.members[i]) { + i++ + } + j++ + } + } + if i < len(s.members) { + out.members = append(out.members, s.members[i:]...) + } + return out +} + +// Size retuns the number of elements in the set. +func (s *PathElementSet) Size() int { return len(s.members) } + +// Has returns true if pe is a member of the set. +func (s *PathElementSet) Has(pe PathElement) bool { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].Less(pe) + }) + if loc == len(s.members) { + return false + } + if s.members[loc].Equals(pe) { + return true + } + return false +} + +// Equals returns true if s and s2 have exactly the same members. +func (s *PathElementSet) Equals(s2 *PathElementSet) bool { + if len(s.members) != len(s2.members) { + return false + } + for k := range s.members { + if !s.members[k].Equals(s2.members[k]) { + return false + } + } + return true +} + +// Iterate calls f for each PathElement in the set. The order is deterministic. +func (s *PathElementSet) Iterate(f func(PathElement)) { + for _, pe := range s.members { + f(pe) + } +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/fromvalue.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/fromvalue.go new file mode 100644 index 0000000000..20775ee022 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/fromvalue.go @@ -0,0 +1,134 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// SetFromValue creates a set containing every leaf field mentioned in v. +func SetFromValue(v value.Value) *Set { + s := NewSet() + + w := objectWalker{ + path: Path{}, + value: v, + allocator: value.NewFreelistAllocator(), + do: func(p Path) { s.Insert(p) }, + } + + w.walk() + return s +} + +type objectWalker struct { + path Path + value value.Value + allocator value.Allocator + + do func(Path) +} + +func (w *objectWalker) walk() { + switch { + case w.value.IsNull(): + case w.value.IsFloat(): + case w.value.IsInt(): + case w.value.IsString(): + case w.value.IsBool(): + // All leaf fields handled the same way (after the switch + // statement). + + // Descend + case w.value.IsList(): + // If the list were atomic, we'd break here, but we don't have + // a schema, so we can't tell. + l := w.value.AsListUsing(w.allocator) + defer w.allocator.Free(l) + iter := l.RangeUsing(w.allocator) + defer w.allocator.Free(iter) + for iter.Next() { + i, value := iter.Item() + w2 := *w + w2.path = append(w.path, w.GuessBestListPathElement(i, value)) + w2.value = value + w2.walk() + } + return + case w.value.IsMap(): + // If the map/struct were atomic, we'd break here, but we don't + // have a schema, so we can't tell. + + m := w.value.AsMapUsing(w.allocator) + defer w.allocator.Free(m) + m.IterateUsing(w.allocator, func(k string, val value.Value) bool { + w2 := *w + w2.path = append(w.path, PathElement{FieldName: &k}) + w2.value = val + w2.walk() + return true + }) + return + } + + // Leaf fields get added to the set. + if len(w.path) > 0 { + w.do(w.path) + } +} + +// AssociativeListCandidateFieldNames lists the field names which are +// considered keys if found in a list element. +var AssociativeListCandidateFieldNames = []string{ + "key", + "id", + "name", +} + +// GuessBestListPathElement guesses whether item is an associative list +// element, which should be referenced by key(s), or if it is not and therefore +// referencing by index is acceptable. Currently this is done by checking +// whether item has any of the fields listed in +// AssociativeListCandidateFieldNames which have scalar values. +func (w *objectWalker) GuessBestListPathElement(index int, item value.Value) PathElement { + if !item.IsMap() { + // Non map items could be parts of sets or regular "atomic" + // lists. We won't try to guess whether something should be a + // set or not. + return PathElement{Index: &index} + } + + m := item.AsMapUsing(w.allocator) + defer w.allocator.Free(m) + var keys value.FieldList + for _, name := range AssociativeListCandidateFieldNames { + f, ok := m.Get(name) + if !ok { + continue + } + // only accept primitive/scalar types as keys. + if f.IsNull() || f.IsMap() || f.IsList() { + continue + } + keys = append(keys, value.Field{Name: name, Value: f}) + } + if len(keys) > 0 { + keys.Sort() + return PathElement{Key: &keys} + } + return PathElement{Index: &index} +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/managers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/managers.go new file mode 100644 index 0000000000..20499dc03d --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/managers.go @@ -0,0 +1,144 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "strings" +) + +// APIVersion describes the version of an object or of a fieldset. +type APIVersion string + +type VersionedSet interface { + Set() *Set + APIVersion() APIVersion + Applied() bool +} + +// VersionedSet associates a version to a set. +type versionedSet struct { + set *Set + apiVersion APIVersion + applied bool +} + +func NewVersionedSet(set *Set, apiVersion APIVersion, applied bool) VersionedSet { + return versionedSet{ + set: set, + apiVersion: apiVersion, + applied: applied, + } +} + +func (v versionedSet) Set() *Set { + return v.set +} + +func (v versionedSet) APIVersion() APIVersion { + return v.apiVersion +} + +func (v versionedSet) Applied() bool { + return v.applied +} + +// ManagedFields is a map from manager to VersionedSet (what they own in +// what version). +type ManagedFields map[string]VersionedSet + +// Equals returns true if the two managedfields are the same, false +// otherwise. +func (lhs ManagedFields) Equals(rhs ManagedFields) bool { + if len(lhs) != len(rhs) { + return false + } + + for manager, left := range lhs { + right, ok := rhs[manager] + if !ok { + return false + } + if left.APIVersion() != right.APIVersion() || left.Applied() != right.Applied() { + return false + } + if !left.Set().Equals(right.Set()) { + return false + } + } + return true +} + +// Copy the list, this is mostly a shallow copy. +func (lhs ManagedFields) Copy() ManagedFields { + copy := ManagedFields{} + for manager, set := range lhs { + copy[manager] = set + } + return copy +} + +// Difference returns a symmetric difference between two Managers. If a +// given user's entry has version X in lhs and version Y in rhs, then +// the return value for that user will be from rhs. If the difference for +// a user is an empty set, that user will not be inserted in the map. +func (lhs ManagedFields) Difference(rhs ManagedFields) ManagedFields { + diff := ManagedFields{} + + for manager, left := range lhs { + right, ok := rhs[manager] + if !ok { + if !left.Set().Empty() { + diff[manager] = left + } + continue + } + + // If we have sets in both but their version + // differs, we don't even diff and keep the + // entire thing. + if left.APIVersion() != right.APIVersion() { + diff[manager] = right + continue + } + + newSet := left.Set().Difference(right.Set()).Union(right.Set().Difference(left.Set())) + if !newSet.Empty() { + diff[manager] = NewVersionedSet(newSet, right.APIVersion(), false) + } + } + + for manager, set := range rhs { + if _, ok := lhs[manager]; ok { + // Already done + continue + } + if !set.Set().Empty() { + diff[manager] = set + } + } + + return diff +} + +func (lhs ManagedFields) String() string { + s := strings.Builder{} + for k, v := range lhs { + fmt.Fprintf(&s, "%s:\n", k) + fmt.Fprintf(&s, "- Applied: %v\n", v.Applied()) + fmt.Fprintf(&s, "- APIVersion: %v\n", v.APIVersion()) + fmt.Fprintf(&s, "- Set: %v\n", v.Set()) + } + return s.String() +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/path.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/path.go new file mode 100644 index 0000000000..0413130bd1 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/path.go @@ -0,0 +1,118 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// Path describes how to select a potentially deeply-nested child field given a +// containing object. +type Path []PathElement + +func (fp Path) String() string { + strs := make([]string, len(fp)) + for i := range fp { + strs[i] = fp[i].String() + } + return strings.Join(strs, "") +} + +// Equals returns true if the two paths are equivalent. +func (fp Path) Equals(fp2 Path) bool { + if len(fp) != len(fp2) { + return false + } + for i := range fp { + if !fp[i].Equals(fp2[i]) { + return false + } + } + return true +} + +// Less provides a lexical order for Paths. +func (fp Path) Compare(rhs Path) int { + i := 0 + for { + if i >= len(fp) && i >= len(rhs) { + // Paths are the same length and all items are equal. + return 0 + } + if i >= len(fp) { + // LHS is shorter. + return -1 + } + if i >= len(rhs) { + // RHS is shorter. + return 1 + } + if c := fp[i].Compare(rhs[i]); c != 0 { + return c + } + // The items are equal; continue. + i++ + } +} + +func (fp Path) Copy() Path { + new := make(Path, len(fp)) + copy(new, fp) + return new +} + +// MakePath constructs a Path. The parts may be PathElements, ints, strings. +func MakePath(parts ...interface{}) (Path, error) { + var fp Path + for _, p := range parts { + switch t := p.(type) { + case PathElement: + fp = append(fp, t) + case int: + // TODO: Understand schema and object and convert this to the + // FieldSpecifier below if appropriate. + fp = append(fp, PathElement{Index: &t}) + case string: + fp = append(fp, PathElement{FieldName: &t}) + case *value.FieldList: + if len(*t) == 0 { + return nil, fmt.Errorf("associative list key type path elements must have at least one key (got zero)") + } + fp = append(fp, PathElement{Key: t}) + case value.Value: + // TODO: understand schema and verify that this is a set type + // TODO: make a copy of t + fp = append(fp, PathElement{Value: &t}) + default: + return nil, fmt.Errorf("unable to make %#v into a path element", p) + } + } + return fp, nil +} + +// MakePathOrDie panics if parts can't be turned into a path. Good for things +// that are known at complie time. +func MakePathOrDie(parts ...interface{}) Path { + fp, err := MakePath(parts...) + if err != nil { + panic(err) + } + return fp +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go new file mode 100644 index 0000000000..9b14ca581b --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go @@ -0,0 +1,85 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "sort" + + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// PathElementValueMap is a map from PathElement to value.Value. +// +// TODO(apelisse): We have multiple very similar implementation of this +// for PathElementSet and SetNodeMap, so we could probably share the +// code. +type PathElementValueMap struct { + members sortedPathElementValues +} + +func MakePathElementValueMap(size int) PathElementValueMap { + return PathElementValueMap{ + members: make(sortedPathElementValues, 0, size), + } +} + +type pathElementValue struct { + PathElement PathElement + Value value.Value +} + +type sortedPathElementValues []pathElementValue + +// Implement the sort interface; this would permit bulk creation, which would +// be faster than doing it one at a time via Insert. +func (spev sortedPathElementValues) Len() int { return len(spev) } +func (spev sortedPathElementValues) Less(i, j int) bool { + return spev[i].PathElement.Less(spev[j].PathElement) +} +func (spev sortedPathElementValues) Swap(i, j int) { spev[i], spev[j] = spev[j], spev[i] } + +// Insert adds the pathelement and associated value in the map. +func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].PathElement.Less(pe) + }) + if loc == len(s.members) { + s.members = append(s.members, pathElementValue{pe, v}) + return + } + if s.members[loc].PathElement.Equals(pe) { + return + } + s.members = append(s.members, pathElementValue{}) + copy(s.members[loc+1:], s.members[loc:]) + s.members[loc] = pathElementValue{pe, v} +} + +// Get retrieves the value associated with the given PathElement from the map. +// (nil, false) is returned if there is no such PathElement. +func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].PathElement.Less(pe) + }) + if loc == len(s.members) { + return nil, false + } + if s.members[loc].PathElement.Equals(pe) { + return s.members[loc].Value, true + } + return nil, false +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize-pe.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize-pe.go new file mode 100644 index 0000000000..cb18e7b1ca --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize-pe.go @@ -0,0 +1,168 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "errors" + "fmt" + "io" + "strconv" + "strings" + + jsoniter "github.com/json-iterator/go" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +var ErrUnknownPathElementType = errors.New("unknown path element type") + +const ( + // Field indicates that the content of this path element is a field's name + peField = "f" + + // Value indicates that the content of this path element is a field's value + peValue = "v" + + // Index indicates that the content of this path element is an index in an array + peIndex = "i" + + // Key indicates that the content of this path element is a key value map + peKey = "k" + + // Separator separates the type of a path element from the contents + peSeparator = ":" +) + +var ( + peFieldSepBytes = []byte(peField + peSeparator) + peValueSepBytes = []byte(peValue + peSeparator) + peIndexSepBytes = []byte(peIndex + peSeparator) + peKeySepBytes = []byte(peKey + peSeparator) + peSepBytes = []byte(peSeparator) +) + +// DeserializePathElement parses a serialized path element +func DeserializePathElement(s string) (PathElement, error) { + b := []byte(s) + if len(b) < 2 { + return PathElement{}, errors.New("key must be 2 characters long:") + } + typeSep, b := b[:2], b[2:] + if typeSep[1] != peSepBytes[0] { + return PathElement{}, fmt.Errorf("missing colon: %v", s) + } + switch typeSep[0] { + case peFieldSepBytes[0]: + // Slice s rather than convert b, to save on + // allocations. + str := s[2:] + return PathElement{ + FieldName: &str, + }, nil + case peValueSepBytes[0]: + iter := readPool.BorrowIterator(b) + defer readPool.ReturnIterator(iter) + v, err := value.ReadJSONIter(iter) + if err != nil { + return PathElement{}, err + } + return PathElement{Value: &v}, nil + case peKeySepBytes[0]: + iter := readPool.BorrowIterator(b) + defer readPool.ReturnIterator(iter) + fields := value.FieldList{} + + iter.ReadObjectCB(func(iter *jsoniter.Iterator, key string) bool { + v, err := value.ReadJSONIter(iter) + if err != nil { + iter.Error = err + return false + } + fields = append(fields, value.Field{Name: key, Value: v}) + return true + }) + fields.Sort() + return PathElement{Key: &fields}, iter.Error + case peIndexSepBytes[0]: + i, err := strconv.Atoi(s[2:]) + if err != nil { + return PathElement{}, err + } + return PathElement{ + Index: &i, + }, nil + default: + return PathElement{}, ErrUnknownPathElementType + } +} + +var ( + readPool = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool() + writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool() +) + +// SerializePathElement serializes a path element +func SerializePathElement(pe PathElement) (string, error) { + buf := strings.Builder{} + err := serializePathElementToWriter(&buf, pe) + return buf.String(), err +} + +func serializePathElementToWriter(w io.Writer, pe PathElement) error { + stream := writePool.BorrowStream(w) + defer writePool.ReturnStream(stream) + switch { + case pe.FieldName != nil: + if _, err := stream.Write(peFieldSepBytes); err != nil { + return err + } + stream.WriteRaw(*pe.FieldName) + case pe.Key != nil: + if _, err := stream.Write(peKeySepBytes); err != nil { + return err + } + stream.WriteObjectStart() + + for i, field := range *pe.Key { + if i > 0 { + stream.WriteMore() + } + stream.WriteObjectField(field.Name) + value.WriteJSONStream(field.Value, stream) + } + stream.WriteObjectEnd() + case pe.Value != nil: + if _, err := stream.Write(peValueSepBytes); err != nil { + return err + } + value.WriteJSONStream(*pe.Value, stream) + case pe.Index != nil: + if _, err := stream.Write(peIndexSepBytes); err != nil { + return err + } + stream.WriteInt(*pe.Index) + default: + return errors.New("invalid PathElement") + } + b := stream.Buffer() + err := stream.Flush() + // Help jsoniter manage its buffers--without this, the next + // use of the stream is likely to require an allocation. Look + // at the jsoniter stream code to understand why. They were probably + // optimizing for folks using the buffer directly. + stream.SetBuffer(b[:0]) + return err +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize.go new file mode 100644 index 0000000000..b992b93c5f --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize.go @@ -0,0 +1,238 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "bytes" + "io" + "unsafe" + + jsoniter "github.com/json-iterator/go" +) + +func (s *Set) ToJSON() ([]byte, error) { + buf := bytes.Buffer{} + err := s.ToJSONStream(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (s *Set) ToJSONStream(w io.Writer) error { + stream := writePool.BorrowStream(w) + defer writePool.ReturnStream(stream) + + var r reusableBuilder + + stream.WriteObjectStart() + err := s.emitContentsV1(false, stream, &r) + if err != nil { + return err + } + stream.WriteObjectEnd() + return stream.Flush() +} + +func manageMemory(stream *jsoniter.Stream) error { + // Help jsoniter manage its buffers--without this, it does a bunch of + // alloctaions that are not necessary. They were probably optimizing + // for folks using the buffer directly. + b := stream.Buffer() + if len(b) > 4096 || cap(b)-len(b) < 2048 { + if err := stream.Flush(); err != nil { + return err + } + stream.SetBuffer(b[:0]) + } + return nil +} + +type reusableBuilder struct { + bytes.Buffer +} + +func (r *reusableBuilder) unsafeString() string { + b := r.Bytes() + return *(*string)(unsafe.Pointer(&b)) +} + +func (r *reusableBuilder) reset() *bytes.Buffer { + r.Reset() + return &r.Buffer +} + +func (s *Set) emitContentsV1(includeSelf bool, stream *jsoniter.Stream, r *reusableBuilder) error { + mi, ci := 0, 0 + first := true + preWrite := func() { + if first { + first = false + return + } + stream.WriteMore() + } + + if includeSelf && !(len(s.Members.members) == 0 && len(s.Children.members) == 0) { + preWrite() + stream.WriteObjectField(".") + stream.WriteEmptyObject() + } + + for mi < len(s.Members.members) && ci < len(s.Children.members) { + mpe := s.Members.members[mi] + cpe := s.Children.members[ci].pathElement + + if c := mpe.Compare(cpe); c < 0 { + preWrite() + if err := serializePathElementToWriter(r.reset(), mpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteEmptyObject() + mi++ + } else if c > 0 { + preWrite() + if err := serializePathElementToWriter(r.reset(), cpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteObjectStart() + if err := s.Children.members[ci].set.emitContentsV1(false, stream, r); err != nil { + return err + } + stream.WriteObjectEnd() + ci++ + } else { + preWrite() + if err := serializePathElementToWriter(r.reset(), cpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteObjectStart() + if err := s.Children.members[ci].set.emitContentsV1(true, stream, r); err != nil { + return err + } + stream.WriteObjectEnd() + mi++ + ci++ + } + } + + for mi < len(s.Members.members) { + mpe := s.Members.members[mi] + + preWrite() + if err := serializePathElementToWriter(r.reset(), mpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteEmptyObject() + mi++ + } + + for ci < len(s.Children.members) { + cpe := s.Children.members[ci].pathElement + + preWrite() + if err := serializePathElementToWriter(r.reset(), cpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteObjectStart() + if err := s.Children.members[ci].set.emitContentsV1(false, stream, r); err != nil { + return err + } + stream.WriteObjectEnd() + ci++ + } + + return manageMemory(stream) +} + +// FromJSON clears s and reads a JSON formatted set structure. +func (s *Set) FromJSON(r io.Reader) error { + // The iterator pool is completely useless for memory management, grrr. + iter := jsoniter.Parse(jsoniter.ConfigCompatibleWithStandardLibrary, r, 4096) + + found, _ := readIterV1(iter) + if found == nil { + *s = Set{} + } else { + *s = *found + } + return iter.Error +} + +// returns true if this subtree is also (or only) a member of parent; s is nil +// if there are no further children. +func readIterV1(iter *jsoniter.Iterator) (children *Set, isMember bool) { + iter.ReadMapCB(func(iter *jsoniter.Iterator, key string) bool { + if key == "." { + isMember = true + iter.Skip() + return true + } + pe, err := DeserializePathElement(key) + if err == ErrUnknownPathElementType { + // Ignore these-- a future version maybe knows what + // they are. We drop these completely rather than try + // to preserve things we don't understand. + iter.Skip() + return true + } else if err != nil { + iter.ReportError("parsing key as path element", err.Error()) + iter.Skip() + return true + } + grandchildren, childIsMember := readIterV1(iter) + if childIsMember { + if children == nil { + children = &Set{} + } + m := &children.Members.members + // Since we expect that most of the time these will have been + // serialized in the right order, we just verify that and append. + appendOK := len(*m) == 0 || (*m)[len(*m)-1].Less(pe) + if appendOK { + *m = append(*m, pe) + } else { + children.Members.Insert(pe) + } + } + if grandchildren != nil { + if children == nil { + children = &Set{} + } + // Since we expect that most of the time these will have been + // serialized in the right order, we just verify that and append. + m := &children.Children.members + appendOK := len(*m) == 0 || (*m)[len(*m)-1].pathElement.Less(pe) + if appendOK { + *m = append(*m, setNode{pe, grandchildren}) + } else { + *children.Children.Descend(pe) = *grandchildren + } + } + return true + }) + if children == nil { + isMember = true + } + + return children, isMember +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go new file mode 100644 index 0000000000..029c2b6003 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go @@ -0,0 +1,406 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "sort" + "strings" +) + +// Set identifies a set of fields. +type Set struct { + // Members lists fields that are part of the set. + // TODO: will be serialized as a list of path elements. + Members PathElementSet + + // Children lists child fields which themselves have children that are + // members of the set. Appearance in this list does not imply membership. + // Note: this is a tree, not an arbitrary graph. + Children SetNodeMap +} + +// NewSet makes a set from a list of paths. +func NewSet(paths ...Path) *Set { + s := &Set{} + for _, p := range paths { + s.Insert(p) + } + return s +} + +// Insert adds the field identified by `p` to the set. Important: parent fields +// are NOT added to the set; if that is desired, they must be added separately. +func (s *Set) Insert(p Path) { + if len(p) == 0 { + // Zero-length path identifies the entire object; we don't + // track top-level ownership. + return + } + for { + if len(p) == 1 { + s.Members.Insert(p[0]) + return + } + s = s.Children.Descend(p[0]) + p = p[1:] + } +} + +// Union returns a Set containing elements which appear in either s or s2. +func (s *Set) Union(s2 *Set) *Set { + return &Set{ + Members: *s.Members.Union(&s2.Members), + Children: *s.Children.Union(&s2.Children), + } +} + +// Intersection returns a Set containing leaf elements which appear in both s +// and s2. Intersection can be constructed from Union and Difference operations +// (example in the tests) but it's much faster to do it in one pass. +func (s *Set) Intersection(s2 *Set) *Set { + return &Set{ + Members: *s.Members.Intersection(&s2.Members), + Children: *s.Children.Intersection(&s2.Children), + } +} + +// Difference returns a Set containing elements which: +// * appear in s +// * do not appear in s2 +// +// In other words, for leaf fields, this acts like a regular set difference +// operation. When non leaf fields are compared with leaf fields ("parents" +// which contain "children"), the effect is: +// * parent - child = parent +// * child - parent = {empty set} +func (s *Set) Difference(s2 *Set) *Set { + return &Set{ + Members: *s.Members.Difference(&s2.Members), + Children: *s.Children.Difference(s2), + } +} + +// RecursiveDifference returns a Set containing elements which: +// * appear in s +// * do not appear in s2 +// +// Compared to a regular difference, +// this removes every field **and its children** from s that is contained in s2. +// +// For example, with s containing `a.b.c` and s2 containing `a.b`, +// a RecursiveDifference will result in `a`, as the entire node `a.b` gets removed. +func (s *Set) RecursiveDifference(s2 *Set) *Set { + return &Set{ + Members: *s.Members.Difference(&s2.Members), + Children: *s.Children.RecursiveDifference(s2), + } +} + +// Size returns the number of members of the set. +func (s *Set) Size() int { + return s.Members.Size() + s.Children.Size() +} + +// Empty returns true if there are no members of the set. It is a separate +// function from Size since it's common to check whether size > 0, and +// potentially much faster to return as soon as a single element is found. +func (s *Set) Empty() bool { + if s.Members.Size() > 0 { + return false + } + return s.Children.Empty() +} + +// Has returns true if the field referenced by `p` is a member of the set. +func (s *Set) Has(p Path) bool { + if len(p) == 0 { + // No one owns "the entire object" + return false + } + for { + if len(p) == 1 { + return s.Members.Has(p[0]) + } + var ok bool + s, ok = s.Children.Get(p[0]) + if !ok { + return false + } + p = p[1:] + } +} + +// Equals returns true if s and s2 have exactly the same members. +func (s *Set) Equals(s2 *Set) bool { + return s.Members.Equals(&s2.Members) && s.Children.Equals(&s2.Children) +} + +// String returns the set one element per line. +func (s *Set) String() string { + elements := []string{} + s.Iterate(func(p Path) { + elements = append(elements, p.String()) + }) + return strings.Join(elements, "\n") +} + +// Iterate calls f once for each field that is a member of the set (preorder +// DFS). The path passed to f will be reused so make a copy if you wish to keep +// it. +func (s *Set) Iterate(f func(Path)) { + s.iteratePrefix(Path{}, f) +} + +func (s *Set) iteratePrefix(prefix Path, f func(Path)) { + s.Members.Iterate(func(pe PathElement) { f(append(prefix, pe)) }) + s.Children.iteratePrefix(prefix, f) +} + +// WithPrefix returns the subset of paths which begin with the given prefix, +// with the prefix not included. +func (s *Set) WithPrefix(pe PathElement) *Set { + subset, ok := s.Children.Get(pe) + if !ok { + return NewSet() + } + return subset +} + +// setNode is a pair of PathElement / Set, for the purpose of expressing +// nested set membership. +type setNode struct { + pathElement PathElement + set *Set +} + +// SetNodeMap is a map of PathElement to subset. +type SetNodeMap struct { + members sortedSetNode +} + +type sortedSetNode []setNode + +// Implement the sort interface; this would permit bulk creation, which would +// be faster than doing it one at a time via Insert. +func (s sortedSetNode) Len() int { return len(s) } +func (s sortedSetNode) Less(i, j int) bool { return s[i].pathElement.Less(s[j].pathElement) } +func (s sortedSetNode) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Descend adds pe to the set if necessary, returning the associated subset. +func (s *SetNodeMap) Descend(pe PathElement) *Set { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].pathElement.Less(pe) + }) + if loc == len(s.members) { + s.members = append(s.members, setNode{pathElement: pe, set: &Set{}}) + return s.members[loc].set + } + if s.members[loc].pathElement.Equals(pe) { + return s.members[loc].set + } + s.members = append(s.members, setNode{}) + copy(s.members[loc+1:], s.members[loc:]) + s.members[loc] = setNode{pathElement: pe, set: &Set{}} + return s.members[loc].set +} + +// Size returns the sum of the number of members of all subsets. +func (s *SetNodeMap) Size() int { + count := 0 + for _, v := range s.members { + count += v.set.Size() + } + return count +} + +// Empty returns false if there's at least one member in some child set. +func (s *SetNodeMap) Empty() bool { + for _, n := range s.members { + if !n.set.Empty() { + return false + } + } + return true +} + +// Get returns (the associated set, true) or (nil, false) if there is none. +func (s *SetNodeMap) Get(pe PathElement) (*Set, bool) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].pathElement.Less(pe) + }) + if loc == len(s.members) { + return nil, false + } + if s.members[loc].pathElement.Equals(pe) { + return s.members[loc].set, true + } + return nil, false +} + +// Equals returns true if s and s2 have the same structure (same nested +// child sets). +func (s *SetNodeMap) Equals(s2 *SetNodeMap) bool { + if len(s.members) != len(s2.members) { + return false + } + for i := range s.members { + if !s.members[i].pathElement.Equals(s2.members[i].pathElement) { + return false + } + if !s.members[i].set.Equals(s2.members[i].set) { + return false + } + } + return true +} + +// Union returns a SetNodeMap with members that appear in either s or s2. +func (s *SetNodeMap) Union(s2 *SetNodeMap) *SetNodeMap { + out := &SetNodeMap{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].pathElement.Less(s2.members[j].pathElement) { + out.members = append(out.members, s.members[i]) + i++ + } else { + if !s2.members[j].pathElement.Less(s.members[i].pathElement) { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: s.members[i].set.Union(s2.members[j].set)}) + i++ + } else { + out.members = append(out.members, s2.members[j]) + } + j++ + } + } + + if i < len(s.members) { + out.members = append(out.members, s.members[i:]...) + } + if j < len(s2.members) { + out.members = append(out.members, s2.members[j:]...) + } + return out +} + +// Intersection returns a SetNodeMap with members that appear in both s and s2. +func (s *SetNodeMap) Intersection(s2 *SetNodeMap) *SetNodeMap { + out := &SetNodeMap{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].pathElement.Less(s2.members[j].pathElement) { + i++ + } else { + if !s2.members[j].pathElement.Less(s.members[i].pathElement) { + res := s.members[i].set.Intersection(s2.members[j].set) + if !res.Empty() { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: res}) + } + i++ + } + j++ + } + } + return out +} + +// Difference returns a SetNodeMap with members that appear in s but not in s2. +func (s *SetNodeMap) Difference(s2 *Set) *SetNodeMap { + out := &SetNodeMap{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.Children.members) { + if s.members[i].pathElement.Less(s2.Children.members[j].pathElement) { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: s.members[i].set}) + i++ + } else { + if !s2.Children.members[j].pathElement.Less(s.members[i].pathElement) { + + diff := s.members[i].set.Difference(s2.Children.members[j].set) + // We aren't permitted to add nodes with no elements. + if !diff.Empty() { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: diff}) + } + + i++ + } + j++ + } + } + + if i < len(s.members) { + out.members = append(out.members, s.members[i:]...) + } + return out +} + +// RecursiveDifference returns a SetNodeMap with members that appear in s but not in s2. +// +// Compared to a regular difference, +// this removes every field **and its children** from s that is contained in s2. +// +// For example, with s containing `a.b.c` and s2 containing `a.b`, +// a RecursiveDifference will result in `a`, as the entire node `a.b` gets removed. +func (s *SetNodeMap) RecursiveDifference(s2 *Set) *SetNodeMap { + out := &SetNodeMap{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.Children.members) { + if s.members[i].pathElement.Less(s2.Children.members[j].pathElement) { + if !s2.Members.Has(s.members[i].pathElement) { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: s.members[i].set}) + } + i++ + } else { + if !s2.Children.members[j].pathElement.Less(s.members[i].pathElement) { + if !s2.Members.Has(s.members[i].pathElement) { + diff := s.members[i].set.RecursiveDifference(s2.Children.members[j].set) + if !diff.Empty() { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: diff}) + } + } + i++ + } + j++ + } + } + + if i < len(s.members) { + for _, c := range s.members[i:] { + if !s2.Members.Has(c.pathElement) { + out.members = append(out.members, c) + } + } + } + + return out +} + +// Iterate calls f for each PathElement in the set. +func (s *SetNodeMap) Iterate(f func(PathElement)) { + for _, n := range s.members { + f(n.pathElement) + } +} + +func (s *SetNodeMap) iteratePrefix(prefix Path, f func(Path)) { + for _, n := range s.members { + pe := n.pathElement + n.set.iteratePrefix(append(prefix, pe), f) + } +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go new file mode 100644 index 0000000000..75a492d8ea --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package merge + +import ( + "fmt" + "sort" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// Conflict is a conflict on a specific field with the current manager of +// that field. It does implement the error interface so that it can be +// used as an error. +type Conflict struct { + Manager string + Path fieldpath.Path +} + +// Conflict is an error. +var _ error = Conflict{} + +// Error formats the conflict as an error. +func (c Conflict) Error() string { + return fmt.Sprintf("conflict with %q: %v", c.Manager, c.Path) +} + +// Equals returns true if c == c2 +func (c Conflict) Equals(c2 Conflict) bool { + if c.Manager != c2.Manager { + return false + } + return c.Path.Equals(c2.Path) +} + +// Conflicts accumulates multiple conflicts and aggregates them by managers. +type Conflicts []Conflict + +var _ error = Conflicts{} + +// Error prints the list of conflicts, grouped by sorted managers. +func (conflicts Conflicts) Error() string { + if len(conflicts) == 1 { + return conflicts[0].Error() + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + managers := []string{} + for manager := range m { + managers = append(managers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(managers) + + messages := []string{} + for _, manager := range managers { + messages = append(messages, fmt.Sprintf("conflicts with %q:", manager)) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return strings.Join(messages, "\n") +} + +// Equals returns true if the lists of conflicts are the same. +func (c Conflicts) Equals(c2 Conflicts) bool { + if len(c) != len(c2) { + return false + } + for i := range c { + if !c[i].Equals(c2[i]) { + return false + } + } + return true +} + +// ToSet aggregates conflicts for all managers into a single Set. +func (c Conflicts) ToSet() *fieldpath.Set { + set := fieldpath.NewSet() + for _, conflict := range []Conflict(c) { + set.Insert(conflict.Path) + } + return set +} + +// ConflictsFromManagers creates a list of conflicts given Managers sets. +func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts { + conflicts := []Conflict{} + + for manager, set := range sets { + set.Set().Iterate(func(p fieldpath.Path) { + conflicts = append(conflicts, Conflict{ + Manager: manager, + Path: p, + }) + }) + } + + return conflicts +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go new file mode 100644 index 0000000000..73e0ec73f9 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -0,0 +1,329 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package merge + +import ( + "fmt" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// Converter is an interface to the conversion logic. The converter +// needs to be able to convert objects from one version to another. +type Converter interface { + Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) + IsMissingVersionError(error) bool +} + +// Updater is the object used to compute updated FieldSets and also +// merge the object on Apply. +type Updater struct { + Converter Converter + IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + + enableUnions bool +} + +// EnableUnionFeature turns on union handling. It is disabled by default until the +// feature is complete. +func (s *Updater) EnableUnionFeature() { + s.enableUnions = true +} + +func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { + conflicts := fieldpath.ManagedFields{} + removed := fieldpath.ManagedFields{} + compare, err := oldObject.Compare(newObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) + } + + versions := map[fieldpath.APIVersion]*typed.Comparison{ + version: compare.ExcludeFields(s.IgnoredFields[version]), + } + + for manager, managerSet := range managers { + if manager == workflow { + continue + } + compare, ok := versions[managerSet.APIVersion()] + if !ok { + var err error + versionedOldObject, err := s.Converter.Convert(oldObject, managerSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + delete(managers, manager) + continue + } + return nil, nil, fmt.Errorf("failed to convert old object: %v", err) + } + versionedNewObject, err := s.Converter.Convert(newObject, managerSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + delete(managers, manager) + continue + } + return nil, nil, fmt.Errorf("failed to convert new object: %v", err) + } + compare, err = versionedOldObject.Compare(versionedNewObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) + } + versions[managerSet.APIVersion()] = compare.ExcludeFields(s.IgnoredFields[managerSet.APIVersion()]) + } + + conflictSet := managerSet.Set().Intersection(compare.Modified.Union(compare.Added)) + if !conflictSet.Empty() { + conflicts[manager] = fieldpath.NewVersionedSet(conflictSet, managerSet.APIVersion(), false) + } + + if !compare.Removed.Empty() { + removed[manager] = fieldpath.NewVersionedSet(compare.Removed, managerSet.APIVersion(), false) + } + } + + if !force && len(conflicts) != 0 { + return nil, nil, ConflictsFromManagers(conflicts) + } + + for manager, conflictSet := range conflicts { + managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(conflictSet.Set()), managers[manager].APIVersion(), managers[manager].Applied()) + } + + for manager, removedSet := range removed { + managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(removedSet.Set()), managers[manager].APIVersion(), managers[manager].Applied()) + } + + for manager := range managers { + if managers[manager].Set().Empty() { + delete(managers, manager) + } + } + + return managers, compare, nil +} + +// Update is the method you should call once you've merged your final +// object on CREATE/UPDATE/PATCH verbs. newObject must be the object +// that you intend to persist (after applying the patch if this is for a +// PATCH call), and liveObject must be the original object (empty if +// this is a CREATE call). +func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string) (*typed.TypedValue, fieldpath.ManagedFields, error) { + var err error + managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if s.enableUnions { + newObject, err = liveObject.NormalizeUnions(newObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if _, ok := managers[manager]; !ok { + managers[manager] = fieldpath.NewVersionedSet(fieldpath.NewSet(), version, false) + } + + ignored := s.IgnoredFields[version] + if ignored == nil { + ignored = fieldpath.NewSet() + } + managers[manager] = fieldpath.NewVersionedSet( + managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed).RecursiveDifference(ignored), + version, + false, + ) + if managers[manager].Set().Empty() { + delete(managers, manager) + } + return newObject, managers, nil +} + +// Apply should be called when Apply is run, given the current object as +// well as the configuration that is applied. This will merge the object +// and return it. If the object hasn't changed, nil is returned (the +// managers can still have changed though). +func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) { + var err error + managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if s.enableUnions { + configObject, err = configObject.NormalizeUnionsApply(configObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + newObject, err := liveObject.Merge(configObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err) + } + if s.enableUnions { + newObject, err = configObject.NormalizeUnionsApply(newObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + lastSet := managers[manager] + set, err := configObject.ToFieldSet() + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to get field set: %v", err) + } + + ignored := s.IgnoredFields[version] + if ignored != nil { + set = set.RecursiveDifference(ignored) + // TODO: is this correct. If we don't remove from lastSet pruning might remove the fields? + if lastSet != nil { + lastSet.Set().RecursiveDifference(ignored) + } + } + managers[manager] = fieldpath.NewVersionedSet(set, version, true) + newObject, err = s.prune(newObject, managers, manager, lastSet) + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err) + } + managers, compare, err := s.update(liveObject, newObject, version, managers, manager, force) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if compare.IsSame() { + newObject = nil + } + return newObject, managers, nil +} + +// prune will remove a field, list or map item, iff: +// * applyingManager applied it last time +// * applyingManager didn't apply it this time +// * no other applier claims to manage it +func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFields, applyingManager string, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) { + if lastSet == nil || lastSet.Set().Empty() { + return merged, nil + } + convertedMerged, err := s.Converter.Convert(merged, lastSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, nil + } + return nil, fmt.Errorf("failed to convert merged object to last applied version: %v", err) + } + + pruned := convertedMerged.RemoveItems(lastSet.Set()) + pruned, err = s.addBackOwnedItems(convertedMerged, pruned, managers, applyingManager) + if err != nil { + return nil, fmt.Errorf("failed add back owned items: %v", err) + } + pruned, err = s.addBackDanglingItems(convertedMerged, pruned, lastSet) + if err != nil { + return nil, fmt.Errorf("failed add back dangling items: %v", err) + } + return s.Converter.Convert(pruned, managers[applyingManager].APIVersion()) +} + +// addBackOwnedItems adds back any fields, list and map items that were removed by prune, +// but other appliers or updaters (or the current applier's new config) claim to own. +func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { + var err error + managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{} + for _, managerSet := range managedFields { + if _, ok := managedAtVersion[managerSet.APIVersion()]; !ok { + managedAtVersion[managerSet.APIVersion()] = fieldpath.NewSet() + } + managedAtVersion[managerSet.APIVersion()] = managedAtVersion[managerSet.APIVersion()].Union(managerSet.Set()) + } + for version, managed := range managedAtVersion { + merged, err = s.Converter.Convert(merged, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + continue + } + return nil, fmt.Errorf("failed to convert merged object at version %v: %v", version, err) + } + pruned, err = s.Converter.Convert(pruned, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + continue + } + return nil, fmt.Errorf("failed to convert pruned object at version %v: %v", version, err) + } + mergedSet, err := merged.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from merged object at version %v: %v", version, err) + } + prunedSet, err := pruned.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from pruned object at version %v: %v", version, err) + } + pruned = merged.RemoveItems(mergedSet.Difference(prunedSet.Union(managed))) + } + return pruned, nil +} + +// addBackDanglingItems makes sure that the fields list and map items removed by prune were +// previously owned by the currently applying manager. This will add back fields list and map items +// that are unowned or that are owned by Updaters and shouldn't be removed. +func (s *Updater) addBackDanglingItems(merged, pruned *typed.TypedValue, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) { + convertedPruned, err := s.Converter.Convert(pruned, lastSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, nil + } + return nil, fmt.Errorf("failed to convert pruned object to last applied version: %v", err) + } + prunedSet, err := convertedPruned.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from pruned object in last applied version: %v", err) + } + mergedSet, err := merged.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from merged object in last applied version: %v", err) + } + return merged.RemoveItems(mergedSet.Difference(prunedSet).Intersection(lastSet.Set())), nil +} + +// reconcileManagedFieldsWithSchemaChanges reconciles the managed fields with any changes to the +// object's schema since the managed fields were written. +// +// Supports: +// - changing types from atomic to granular +// - changing types from granular to atomic +func (s *Updater) reconcileManagedFieldsWithSchemaChanges(liveObject *typed.TypedValue, managers fieldpath.ManagedFields) (fieldpath.ManagedFields, error) { + result := fieldpath.ManagedFields{} + for manager, versionedSet := range managers { + tv, err := s.Converter.Convert(liveObject, versionedSet.APIVersion()) + if s.Converter.IsMissingVersionError(err) { // okay to skip, obsolete versions will be deleted automatically anyway + continue + } + if err != nil { + return nil, err + } + reconciled, err := typed.ReconcileFieldSetWithSchema(versionedSet.Set(), tv) + if err != nil { + return nil, err + } + if reconciled != nil { + result[manager] = fieldpath.NewVersionedSet(reconciled, versionedSet.APIVersion(), versionedSet.Applied()) + } else { + result[manager] = versionedSet + } + } + return result, nil +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/doc.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/doc.go new file mode 100644 index 0000000000..9081ccbc73 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package schema defines a targeted schema language which allows one to +// represent all the schema information necessary to perform "structured" +// merges and diffs. +// +// Due to the targeted nature of the data model, the schema language can fit in +// just a few hundred lines of go code, making it much more understandable and +// concise than e.g. OpenAPI. +// +// This schema was derived by observing the API objects used by Kubernetes, and +// formalizing a model which allows certain operations ("apply") to be more +// well defined. It is currently missing one feature: one-of ("unions"). +package schema diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go new file mode 100644 index 0000000000..01103b38a9 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go @@ -0,0 +1,261 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import "sync" + +// Schema is a list of named types. +// +// Schema types are indexed in a map before the first search so this type +// should be considered immutable. +type Schema struct { + Types []TypeDef `yaml:"types,omitempty"` + + once sync.Once + m map[string]TypeDef +} + +// A TypeSpecifier references a particular type in a schema. +type TypeSpecifier struct { + Type TypeRef `yaml:"type,omitempty"` + Schema Schema `yaml:"schema,omitempty"` +} + +// TypeDef represents a named type in a schema. +type TypeDef struct { + // Top level types should be named. Every type must have a unique name. + Name string `yaml:"name,omitempty"` + + Atom `yaml:"atom,omitempty,inline"` +} + +// TypeRef either refers to a named type or declares an inlined type. +type TypeRef struct { + // Either the name or one member of Atom should be set. + NamedType *string `yaml:"namedType,omitempty"` + Inlined Atom `yaml:",inline,omitempty"` +} + +// Atom represents the smallest possible pieces of the type system. +// Each set field in the Atom represents a possible type for the object. +// If none of the fields are set, any object will fail validation against the atom. +type Atom struct { + *Scalar `yaml:"scalar,omitempty"` + *List `yaml:"list,omitempty"` + *Map `yaml:"map,omitempty"` +} + +// Scalar (AKA "primitive") represents a type which has a single value which is +// either numeric, string, or boolean. +// +// TODO: split numeric into float/int? Something even more fine-grained? +type Scalar string + +const ( + Numeric = Scalar("numeric") + String = Scalar("string") + Boolean = Scalar("boolean") +) + +// ElementRelationship is an enum of the different possible relationships +// between the elements of container types (maps, lists). +type ElementRelationship string + +const ( + // Associative only applies to lists (see the documentation there). + Associative = ElementRelationship("associative") + // Atomic makes container types (lists, maps) behave + // as scalars / leaf fields + Atomic = ElementRelationship("atomic") + // Separable means the items of the container type have no particular + // relationship (default behavior for maps). + Separable = ElementRelationship("separable") +) + +// Map is a key-value pair. Its default semantics are the same as an +// associative list, but: +// * It is serialized differently: +// map: {"k": {"value": "v"}} +// list: [{"key": "k", "value": "v"}] +// * Keys must be string typed. +// * Keys can't have multiple components. +// +// Optionally, maps may be atomic (for example, imagine representing an RGB +// color value--it doesn't make sense to have different actors own the R and G +// values). +// +// Maps may also represent a type which is composed of a number of different fields. +// Each field has a name and a type. +// +// Fields are indexed in a map before the first search so this type +// should be considered immutable. +type Map struct { + // Each struct field appears exactly once in this list. The order in + // this list defines the canonical field ordering. + Fields []StructField `yaml:"fields,omitempty"` + + // A Union is a grouping of fields with special rules. It may refer to + // one or more fields in the above list. A given field from the above + // list may be referenced in exactly 0 or 1 places in the below list. + // One can have multiple unions in the same struct, but the fields can't + // overlap between unions. + Unions []Union `yaml:"unions,omitempty"` + + // ElementType is the type of the structs's unknown fields. + ElementType TypeRef `yaml:"elementType,omitempty"` + + // ElementRelationship states the relationship between the map's items. + // * `separable` (or unset) implies that each element is 100% independent. + // * `atomic` implies that all elements depend on each other, and this + // is effectively a scalar / leaf field; it doesn't make sense for + // separate actors to set the elements. Example: an RGB color struct; + // it would never make sense to "own" only one component of the + // color. + // The default behavior for maps is `separable`; it's permitted to + // leave this unset to get the default behavior. + ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"` + + once sync.Once + m map[string]StructField +} + +// FindField is a convenience function that returns the referenced StructField, +// if it exists, or (nil, false) if it doesn't. +func (m *Map) FindField(name string) (StructField, bool) { + m.once.Do(func() { + m.m = make(map[string]StructField, len(m.Fields)) + for _, field := range m.Fields { + m.m[field.Name] = field + } + }) + sf, ok := m.m[name] + return sf, ok +} + +// UnionFields are mapping between the fields that are part of the union and +// their discriminated value. The discriminated value has to be set, and +// should not conflict with other discriminated value in the list. +type UnionField struct { + // FieldName is the name of the field that is part of the union. This + // is the serialized form of the field. + FieldName string `yaml:"fieldName"` + // Discriminatorvalue is the value of the discriminator to + // select that field. If the union doesn't have a discriminator, + // this field is ignored. + DiscriminatorValue string `yaml:"discriminatorValue"` +} + +// Union, or oneof, means that only one of multiple fields of a structure can be +// set at a time. Setting the discriminator helps clearing oher fields: +// - If discriminator changed to non-nil, and a new field has been added +// that doesn't match, an error is returned, +// - If discriminator hasn't changed and two fields or more are set, an +// error is returned, +// - If discriminator changed to non-nil, all other fields but the +// discriminated one will be cleared, +// - Otherwise, If only one field is left, update discriminator to that value. +type Union struct { + // Discriminator, if present, is the name of the field that + // discriminates fields in the union. The mapping between the value of + // the discriminator and the field is done by using the Fields list + // below. + Discriminator *string `yaml:"discriminator,omitempty"` + + // DeduceInvalidDiscriminator indicates if the discriminator + // should be updated automatically based on the fields set. This + // typically defaults to false since we don't want to deduce by + // default (the behavior exists to maintain compatibility on + // existing types and shouldn't be used for new types). + DeduceInvalidDiscriminator bool `yaml:"deduceInvalidDiscriminator,omitempty"` + + // This is the list of fields that belong to this union. All the + // fields present in here have to be part of the parent + // structure. Discriminator (if oneOf has one), is NOT included in + // this list. The value for field is how we map the name of the field + // to actual value for discriminator. + Fields []UnionField `yaml:"fields,omitempty"` +} + +// StructField pairs a field name with a field type. +type StructField struct { + // Name is the field name. + Name string `yaml:"name,omitempty"` + // Type is the field type. + Type TypeRef `yaml:"type,omitempty"` + // Default value for the field, nil if not present. + Default interface{} `yaml:"default,omitempty"` +} + +// List represents a type which contains a zero or more elements, all of the +// same subtype. Lists may be either associative: each element is more or less +// independent and could be managed by separate entities in the system; or +// atomic, where the elements are heavily dependent on each other: it is not +// sensible to change one element without considering the ramifications on all +// the other elements. +type List struct { + // ElementType is the type of the list's elements. + ElementType TypeRef `yaml:"elementType,omitempty"` + + // ElementRelationship states the relationship between the list's elements + // and must have one of these values: + // * `atomic`: the list is treated as a single entity, like a scalar. + // * `associative`: + // - If the list element is a scalar, the list is treated as a set. + // - If the list element is a map, the list is treated as a map. + // There is no default for this value for lists; all schemas must + // explicitly state the element relationship for all lists. + ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"` + + // Iff ElementRelationship is `associative`, and the element type is + // map, then Keys must have non-zero length, and it lists the fields + // of the element's map type which are to be used as the keys of the + // list. + // + // TODO: change this to "non-atomic struct" above and make the code reflect this. + // + // Each key must refer to a single field name (no nesting, not JSONPath). + Keys []string `yaml:"keys,omitempty"` +} + +// FindNamedType is a convenience function that returns the referenced TypeDef, +// if it exists, or (nil, false) if it doesn't. +func (s *Schema) FindNamedType(name string) (TypeDef, bool) { + s.once.Do(func() { + s.m = make(map[string]TypeDef, len(s.Types)) + for _, t := range s.Types { + s.m[t.Name] = t + } + }) + t, ok := s.m[name] + return t, ok +} + +// Resolve is a convenience function which returns the atom referenced, whether +// it is inline or named. Returns (Atom{}, false) if the type can't be resolved. +// +// This allows callers to not care about the difference between a (possibly +// inlined) reference and a definition. +func (s *Schema) Resolve(tr TypeRef) (Atom, bool) { + if tr.NamedType != nil { + t, ok := s.FindNamedType(*tr.NamedType) + if !ok { + return Atom{}, false + } + return t.Atom, true + } + return tr.Inlined, true +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go new file mode 100644 index 0000000000..4c303eecc5 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go @@ -0,0 +1,199 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import "reflect" + +// Equals returns true iff the two Schemas are equal. +func (a *Schema) Equals(b *Schema) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + + if len(a.Types) != len(b.Types) { + return false + } + for i := range a.Types { + if !a.Types[i].Equals(&b.Types[i]) { + return false + } + } + return true +} + +// Equals returns true iff the two TypeRefs are equal. +// +// Note that two typerefs that have an equivalent type but where one is +// inlined and the other is named, are not considered equal. +func (a *TypeRef) Equals(b *TypeRef) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if (a.NamedType == nil) != (b.NamedType == nil) { + return false + } + if a.NamedType != nil { + if *a.NamedType != *b.NamedType { + return false + } + //return true + } + return a.Inlined.Equals(&b.Inlined) +} + +// Equals returns true iff the two TypeDefs are equal. +func (a *TypeDef) Equals(b *TypeDef) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if a.Name != b.Name { + return false + } + return a.Atom.Equals(&b.Atom) +} + +// Equals returns true iff the two Atoms are equal. +func (a *Atom) Equals(b *Atom) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if (a.Scalar == nil) != (b.Scalar == nil) { + return false + } + if (a.List == nil) != (b.List == nil) { + return false + } + if (a.Map == nil) != (b.Map == nil) { + return false + } + switch { + case a.Scalar != nil: + return *a.Scalar == *b.Scalar + case a.List != nil: + return a.List.Equals(b.List) + case a.Map != nil: + return a.Map.Equals(b.Map) + } + return true +} + +// Equals returns true iff the two Maps are equal. +func (a *Map) Equals(b *Map) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if !a.ElementType.Equals(&b.ElementType) { + return false + } + if a.ElementRelationship != b.ElementRelationship { + return false + } + if len(a.Fields) != len(b.Fields) { + return false + } + for i := range a.Fields { + if !a.Fields[i].Equals(&b.Fields[i]) { + return false + } + } + if len(a.Unions) != len(b.Unions) { + return false + } + for i := range a.Unions { + if !a.Unions[i].Equals(&b.Unions[i]) { + return false + } + } + return true +} + +// Equals returns true iff the two Unions are equal. +func (a *Union) Equals(b *Union) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if (a.Discriminator == nil) != (b.Discriminator == nil) { + return false + } + if a.Discriminator != nil { + if *a.Discriminator != *b.Discriminator { + return false + } + } + if a.DeduceInvalidDiscriminator != b.DeduceInvalidDiscriminator { + return false + } + if len(a.Fields) != len(b.Fields) { + return false + } + for i := range a.Fields { + if !a.Fields[i].Equals(&b.Fields[i]) { + return false + } + } + return true +} + +// Equals returns true iff the two UnionFields are equal. +func (a *UnionField) Equals(b *UnionField) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if a.FieldName != b.FieldName { + return false + } + if a.DiscriminatorValue != b.DiscriminatorValue { + return false + } + return true +} + +// Equals returns true iff the two StructFields are equal. +func (a *StructField) Equals(b *StructField) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if a.Name != b.Name { + return false + } + if !reflect.DeepEqual(a.Default, b.Default) { + return false + } + return a.Type.Equals(&b.Type) +} + +// Equals returns true iff the two Lists are equal. +func (a *List) Equals(b *List) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if !a.ElementType.Equals(&b.ElementType) { + return false + } + if a.ElementRelationship != b.ElementRelationship { + return false + } + if len(a.Keys) != len(b.Keys) { + return false + } + for i := range a.Keys { + if a.Keys[i] != b.Keys[i] { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go new file mode 100644 index 0000000000..bb60e2a5fd --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go @@ -0,0 +1,161 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +// SchemaSchemaYAML is a schema against which you can validate other schemas. +// It will validate itself. It can be unmarshalled into a Schema type. +var SchemaSchemaYAML = `types: +- name: schema + map: + fields: + - name: types + type: + list: + elementRelationship: associative + elementType: + namedType: typeDef + keys: + - name +- name: typeDef + map: + fields: + - name: name + type: + scalar: string + - name: scalar + type: + scalar: string + - name: map + type: + namedType: map + - name: list + type: + namedType: list + - name: untyped + type: + namedType: untyped +- name: typeRef + map: + fields: + - name: namedType + type: + scalar: string + - name: scalar + type: + scalar: string + - name: map + type: + namedType: map + - name: list + type: + namedType: list + - name: untyped + type: + namedType: untyped +- name: scalar + scalar: string +- name: map + map: + fields: + - name: fields + type: + list: + elementType: + namedType: structField + elementRelationship: associative + keys: [ "name" ] + - name: unions + type: + list: + elementType: + namedType: union + elementRelationship: atomic + - name: elementType + type: + namedType: typeRef + - name: elementRelationship + type: + scalar: string +- name: unionField + map: + fields: + - name: fieldName + type: + scalar: string + - name: discriminatorValue + type: + scalar: string +- name: union + map: + fields: + - name: discriminator + type: + scalar: string + - name: deduceInvalidDiscriminator + type: + scalar: bool + - name: fields + type: + list: + elementRelationship: associative + elementType: + namedType: unionField + keys: + - fieldName +- name: structField + map: + fields: + - name: name + type: + scalar: string + - name: type + type: + namedType: typeRef + - name: default + type: + namedType: __untyped_atomic_ +- name: list + map: + fields: + - name: elementType + type: + namedType: typeRef + - name: elementRelationship + type: + scalar: string + - name: keys + type: + list: + elementType: + scalar: string +- name: untyped + map: + fields: + - name: elementRelationship + type: + scalar: string +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +` diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/doc.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/doc.go new file mode 100644 index 0000000000..ca4e60542a --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package typed contains logic for operating on values with given schemas. +package typed diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go new file mode 100644 index 0000000000..6b2b2cb4a1 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go @@ -0,0 +1,256 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "errors" + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// ValidationError reports an error about a particular field +type ValidationError struct { + Path string + ErrorMessage string +} + +// Error returns a human readable error message. +func (ve ValidationError) Error() string { + if len(ve.Path) == 0 { + return ve.ErrorMessage + } + return fmt.Sprintf("%s: %v", ve.Path, ve.ErrorMessage) +} + +// ValidationErrors accumulates multiple validation error messages. +type ValidationErrors []ValidationError + +// Error returns a human readable error message reporting each error in the +// list. +func (errs ValidationErrors) Error() string { + if len(errs) == 1 { + return errs[0].Error() + } + messages := []string{"errors:"} + for _, e := range errs { + messages = append(messages, " "+e.Error()) + } + return strings.Join(messages, "\n") +} + +// Set the given path to all the validation errors. +func (errs ValidationErrors) WithPath(p string) ValidationErrors { + for i := range errs { + errs[i].Path = p + } + return errs +} + +// WithPrefix prefixes all errors path with the given pathelement. This +// is useful when unwinding the stack on errors. +func (errs ValidationErrors) WithPrefix(prefix string) ValidationErrors { + for i := range errs { + errs[i].Path = prefix + errs[i].Path + } + return errs +} + +// WithLazyPrefix prefixes all errors path with the given pathelement. +// This is useful when unwinding the stack on errors. Prefix is +// computed lazily only if there is an error. +func (errs ValidationErrors) WithLazyPrefix(fn func() string) ValidationErrors { + if len(errs) == 0 { + return errs + } + prefix := "" + if fn != nil { + prefix = fn() + } + for i := range errs { + errs[i].Path = prefix + errs[i].Path + } + return errs +} + +func errorf(format string, args ...interface{}) ValidationErrors { + return ValidationErrors{{ + ErrorMessage: fmt.Sprintf(format, args...), + }} +} + +type atomHandler interface { + doScalar(*schema.Scalar) ValidationErrors + doList(*schema.List) ValidationErrors + doMap(*schema.Map) ValidationErrors +} + +func resolveSchema(s *schema.Schema, tr schema.TypeRef, v value.Value, ah atomHandler) ValidationErrors { + a, ok := s.Resolve(tr) + if !ok { + return errorf("schema error: no type found matching: %v", *tr.NamedType) + } + + a = deduceAtom(a, v) + return handleAtom(a, tr, ah) +} + +// deduceAtom determines which of the possible types in atom 'atom' applies to value 'val'. +// If val is of a type allowed by atom, return a copy of atom with all other types set to nil. +// if val is nil, or is not of a type allowed by atom, just return the original atom, +// and validation will fail at a later stage. (with a more useful error) +func deduceAtom(atom schema.Atom, val value.Value) schema.Atom { + switch { + case val == nil: + case val.IsFloat(), val.IsInt(), val.IsString(), val.IsBool(): + if atom.Scalar != nil { + return schema.Atom{Scalar: atom.Scalar} + } + case val.IsList(): + if atom.List != nil { + return schema.Atom{List: atom.List} + } + case val.IsMap(): + if atom.Map != nil { + return schema.Atom{Map: atom.Map} + } + } + return atom +} + +func handleAtom(a schema.Atom, tr schema.TypeRef, ah atomHandler) ValidationErrors { + switch { + case a.Map != nil: + return ah.doMap(a.Map) + case a.Scalar != nil: + return ah.doScalar(a.Scalar) + case a.List != nil: + return ah.doList(a.List) + } + + name := "inlined" + if tr.NamedType != nil { + name = "named type: " + *tr.NamedType + } + + return errorf("schema error: invalid atom: %v", name) +} + +// Returns the list, or an error. Reminder: nil is a valid list and might be returned. +func listValue(a value.Allocator, val value.Value) (value.List, error) { + if val.IsNull() { + // Null is a valid list. + return nil, nil + } + if !val.IsList() { + return nil, fmt.Errorf("expected list, got %v", val) + } + return val.AsListUsing(a), nil +} + +// Returns the map, or an error. Reminder: nil is a valid map and might be returned. +func mapValue(a value.Allocator, val value.Value) (value.Map, error) { + if val == nil { + return nil, fmt.Errorf("expected map, got nil") + } + if val.IsNull() { + // Null is a valid map. + return nil, nil + } + if !val.IsMap() { + return nil, fmt.Errorf("expected map, got %v", val) + } + return val.AsMapUsing(a), nil +} + +func getAssociativeKeyDefault(s *schema.Schema, list *schema.List, fieldName string) (interface{}, error) { + atom, ok := s.Resolve(list.ElementType) + if !ok { + return nil, errors.New("invalid elementType for list") + } + if atom.Map == nil { + return nil, errors.New("associative list may not have non-map types") + } + // If the field is not found, we can assume there is no default. + field, _ := atom.Map.FindField(fieldName) + return field.Default, nil +} + +func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { + pe := fieldpath.PathElement{} + if child.IsNull() { + // null entries are illegal. + return pe, errors.New("associative list with keys may not have a null element") + } + if !child.IsMap() { + return pe, errors.New("associative list with keys may not have non-map elements") + } + keyMap := value.FieldList{} + m := child.AsMapUsing(a) + defer a.Free(m) + for _, fieldName := range list.Keys { + if val, ok := m.Get(fieldName); ok { + keyMap = append(keyMap, value.Field{Name: fieldName, Value: val}) + } else if def, err := getAssociativeKeyDefault(s, list, fieldName); err != nil { + return pe, fmt.Errorf("couldn't find default value for %v: %v", fieldName, err) + } else if def != nil { + keyMap = append(keyMap, value.Field{Name: fieldName, Value: value.NewValueInterface(def)}) + } else { + return pe, fmt.Errorf("associative list with keys has an element that omits key field %q (and doesn't have default value)", fieldName) + } + } + keyMap.Sort() + pe.Key = &keyMap + return pe, nil +} + +func setItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { + pe := fieldpath.PathElement{} + switch { + case child.IsMap(): + // TODO: atomic maps should be acceptable. + return pe, errors.New("associative list without keys has an element that's a map type") + case child.IsList(): + // Should we support a set of lists? For the moment + // let's say we don't. + // TODO: atomic lists should be acceptable. + return pe, errors.New("not supported: associative list with lists as elements") + case child.IsNull(): + return pe, errors.New("associative list without keys has an element that's an explicit null") + default: + // We are a set type. + pe.Value = &child + return pe, nil + } +} + +func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { + if list.ElementRelationship == schema.Associative { + if len(list.Keys) > 0 { + return keyedAssociativeListItemToPathElement(a, s, list, index, child) + } + + // If there's no keys, then we must be a set of primitives. + return setItemToPathElement(list, index, child) + } + + // Use the index as a key for atomic lists. + return fieldpath.PathElement{Index: &index}, nil +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go new file mode 100644 index 0000000000..7e20f4083a --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -0,0 +1,353 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "math" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +type mergingWalker struct { + lhs value.Value + rhs value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Current path that we are merging + path fieldpath.Path + + // How to merge. Called after schema validation for all leaf fields. + rule mergeRule + + // If set, called after non-leaf items have been merged. (`out` is + // probably already set.) + postItemHook mergeRule + + // output of the merge operation (nil if none) + out *interface{} + + // internal housekeeping--don't set when constructing. + inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*mergingWalker + + allocator value.Allocator +} + +// merge rules examine w.lhs and w.rhs (up to one of which may be nil) and +// optionally set w.out. If lhs and rhs are both set, they will be of +// comparable type. +type mergeRule func(w *mergingWalker) + +var ( + ruleKeepRHS = mergeRule(func(w *mergingWalker) { + if w.rhs != nil { + v := w.rhs.Unstructured() + w.out = &v + } else if w.lhs != nil { + v := w.lhs.Unstructured() + w.out = &v + } + }) +) + +// merge sets w.out. +func (w *mergingWalker) merge(prefixFn func() string) (errs ValidationErrors) { + if w.lhs == nil && w.rhs == nil { + // check this condidition here instead of everywhere below. + return errorf("at least one of lhs and rhs must be provided") + } + a, ok := w.schema.Resolve(w.typeRef) + if !ok { + return errorf("schema error: no type found matching: %v", *w.typeRef.NamedType) + } + + alhs := deduceAtom(a, w.lhs) + arhs := deduceAtom(a, w.rhs) + if alhs.Equals(&arhs) { + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } else { + w2 := *w + errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...) + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } + + if !w.inLeaf && w.postItemHook != nil { + w.postItemHook(w) + } + return errs.WithLazyPrefix(prefixFn) +} + +// doLeaf should be called on leaves before descending into children, if there +// will be a descent. It modifies w.inLeaf. +func (w *mergingWalker) doLeaf() { + if w.inLeaf { + // We're in a "big leaf", an atomic map or list. Ignore + // subsequent leaves. + return + } + w.inLeaf = true + + // We don't recurse into leaf fields for merging. + w.rule(w) +} + +func (w *mergingWalker) doScalar(t *schema.Scalar) (errs ValidationErrors) { + errs = append(errs, validateScalar(t, w.lhs, "lhs: ")...) + errs = append(errs, validateScalar(t, w.rhs, "rhs: ")...) + if len(errs) > 0 { + return errs + } + + // All scalars are leaf fields. + w.doLeaf() + + return nil +} + +func (w *mergingWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *mergingWalker { + if w.spareWalkers == nil { + // first descent. + w.spareWalkers = &[]*mergingWalker{} + } + var w2 *mergingWalker + if n := len(*w.spareWalkers); n > 0 { + w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1] + } else { + w2 = &mergingWalker{} + } + *w2 = *w + w2.typeRef = tr + w2.path = append(w2.path, pe) + w2.lhs = nil + w2.rhs = nil + w2.out = nil + return w2 +} + +func (w *mergingWalker) finishDescent(w2 *mergingWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + w.path = w2.path[:len(w2.path)-1] + *w.spareWalkers = append(*w.spareWalkers, w2) +} + +func (w *mergingWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) { + if v == nil { + return nil, nil + } + m, err := mapValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return m, nil +} + +func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) { + rLen := 0 + if rhs != nil { + rLen = rhs.Length() + } + lLen := 0 + if lhs != nil { + lLen = lhs.Length() + } + out := make([]interface{}, 0, int(math.Max(float64(rLen), float64(lLen)))) + + // TODO: ordering is totally wrong. + // TODO: might as well make the map order work the same way. + + // This is a cheap hack to at least make the output order stable. + rhsOrder := make([]fieldpath.PathElement, 0, rLen) + + // First, collect all RHS children. + observedRHS := fieldpath.MakePathElementValueMap(rLen) + if rhs != nil { + for i := 0; i < rhs.Length(); i++ { + child := rhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + if err != nil { + errs = append(errs, errorf("rhs: element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if _, ok := observedRHS.Get(pe); ok { + errs = append(errs, errorf("rhs: duplicate entries for key %v", pe.String())...) + } + observedRHS.Insert(pe, child) + rhsOrder = append(rhsOrder, pe) + } + } + + // Then merge with LHS children. + observedLHS := fieldpath.MakePathElementSet(lLen) + if lhs != nil { + for i := 0; i < lhs.Length(); i++ { + child := lhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + if err != nil { + errs = append(errs, errorf("lhs: element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if observedLHS.Has(pe) { + errs = append(errs, errorf("lhs: duplicate entries for key %v", pe.String())...) + continue + } + observedLHS.Insert(pe) + w2 := w.prepareDescent(pe, t.ElementType) + w2.lhs = value.Value(child) + if rchild, ok := observedRHS.Get(pe); ok { + w2.rhs = rchild + } + errs = append(errs, w2.merge(pe.String)...) + if w2.out != nil { + out = append(out, *w2.out) + } + w.finishDescent(w2) + } + } + + for _, pe := range rhsOrder { + if observedLHS.Has(pe) { + continue + } + value, _ := observedRHS.Get(pe) + w2 := w.prepareDescent(pe, t.ElementType) + w2.rhs = value + errs = append(errs, w2.merge(pe.String)...) + if w2.out != nil { + out = append(out, *w2.out) + } + w.finishDescent(w2) + } + + if len(out) > 0 { + i := interface{}(out) + w.out = &i + } + + return errs +} + +func (w *mergingWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { + if v == nil { + return nil, nil + } + l, err := listValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return l, nil +} + +func (w *mergingWalker) doList(t *schema.List) (errs ValidationErrors) { + lhs, _ := w.derefList("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefList("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = w.visitListItems(t, lhs, rhs) + + return errs +} + +func (w *mergingWalker) visitMapItem(t *schema.Map, out map[string]interface{}, key string, lhs, rhs value.Value) (errs ValidationErrors) { + fieldType := t.ElementType + if sf, ok := t.FindField(key); ok { + fieldType = sf.Type + } + pe := fieldpath.PathElement{FieldName: &key} + w2 := w.prepareDescent(pe, fieldType) + w2.lhs = lhs + w2.rhs = rhs + errs = append(errs, w2.merge(pe.String)...) + if w2.out != nil { + out[key] = *w2.out + } + w.finishDescent(w2) + return errs +} + +func (w *mergingWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) { + out := map[string]interface{}{} + + value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool { + errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...) + return true + }) + if len(out) > 0 { + i := interface{}(out) + w.out = &i + } + + return errs +} + +func (w *mergingWalker) doMap(t *schema.Map) (errs ValidationErrors) { + lhs, _ := w.derefMap("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefMap("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = append(errs, w.visitMapItems(t, lhs, rhs)...) + + return errs +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go new file mode 100644 index 0000000000..3949a78fc6 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + + yaml "gopkg.in/yaml.v2" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// YAMLObject is an object encoded in YAML. +type YAMLObject string + +// Parser implements YAMLParser and allows introspecting the schema. +type Parser struct { + Schema schema.Schema +} + +// create builds an unvalidated parser. +func create(s YAMLObject) (*Parser, error) { + p := Parser{} + err := yaml.Unmarshal([]byte(s), &p.Schema) + return &p, err +} + +func createOrDie(schema YAMLObject) *Parser { + p, err := create(schema) + if err != nil { + panic(fmt.Errorf("failed to create parser: %v", err)) + } + return p +} + +var ssParser = createOrDie(YAMLObject(schema.SchemaSchemaYAML)) + +// NewParser will build a YAMLParser from a schema. The schema is validated. +func NewParser(schema YAMLObject) (*Parser, error) { + _, err := ssParser.Type("schema").FromYAML(schema) + if err != nil { + return nil, fmt.Errorf("unable to validate schema: %v", err) + } + p, err := create(schema) + if err != nil { + return nil, err + } + return p, nil +} + +// TypeNames returns a list of types this parser understands. +func (p *Parser) TypeNames() (names []string) { + for _, td := range p.Schema.Types { + names = append(names, td.Name) + } + return names +} + +// Type returns a helper which can produce objects of the given type. Any +// errors are deferred until a further function is called. +func (p *Parser) Type(name string) ParseableType { + return ParseableType{ + Schema: &p.Schema, + TypeRef: schema.TypeRef{NamedType: &name}, + } +} + +// ParseableType allows for easy production of typed objects. +type ParseableType struct { + TypeRef schema.TypeRef + Schema *schema.Schema +} + +// IsValid return true if p's schema and typename are valid. +func (p ParseableType) IsValid() bool { + _, ok := p.Schema.Resolve(p.TypeRef) + return ok +} + +// FromYAML parses a yaml string into an object with the current schema +// and the type "typename" or an error if validation fails. +func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { + var v interface{} + err := yaml.Unmarshal([]byte(object), &v) + if err != nil { + return nil, err + } + return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef) +} + +// FromUnstructured converts a go "interface{}" type, typically an +// unstructured object in Kubernetes world, to a TypedValue. It returns an +// error if the resulting object fails schema validation. +// The provided interface{} must be one of: map[string]interface{}, +// map[interface{}]interface{}, []interface{}, int types, float types, +// string or boolean. Nested interface{} must also be one of these types. +func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { + return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef) +} + +// FromStructured converts a go "interface{}" type, typically an structured object in +// Kubernetes, to a TypedValue. It will return an error if the resulting object fails +// schema validation. The provided "interface{}" value must be a pointer so that the +// value can be modified via reflection. The provided "interface{}" may contain structs +// and types that are converted to Values by the jsonMarshaler interface. +func (p ParseableType) FromStructured(in interface{}) (*TypedValue, error) { + v, err := value.NewValueReflect(in) + if err != nil { + return nil, fmt.Errorf("error creating struct value reflector: %v", err) + } + return AsTyped(v, p.Schema, p.TypeRef) +} + +// DeducedParseableType is a ParseableType that deduces the type from +// the content of the object. +var DeducedParseableType ParseableType = createOrDie(YAMLObject(`types: +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`)).Type("__untyped_deduced_") diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go new file mode 100644 index 0000000000..5a8214ae2d --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go @@ -0,0 +1,295 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "sync" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +var fmPool = sync.Pool{ + New: func() interface{} { return &reconcileWithSchemaWalker{} }, +} + +func (v *reconcileWithSchemaWalker) finished() { + v.fieldSet = nil + v.schema = nil + v.value = nil + v.typeRef = schema.TypeRef{} + v.path = nil + v.toRemove = nil + v.toAdd = nil + fmPool.Put(v) +} + +type reconcileWithSchemaWalker struct { + value *TypedValue // root of the live object + schema *schema.Schema // root of the live schema + + // state of node being visited by walker + fieldSet *fieldpath.Set + typeRef schema.TypeRef + path fieldpath.Path + isAtomic bool + + // the accumulated diff to perform to apply reconciliation + toRemove *fieldpath.Set // paths to remove recursively + toAdd *fieldpath.Set // paths to add after any removals + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*reconcileWithSchemaWalker +} + +func (v *reconcileWithSchemaWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *reconcileWithSchemaWalker { + if v.spareWalkers == nil { + // first descent. + v.spareWalkers = &[]*reconcileWithSchemaWalker{} + } + var v2 *reconcileWithSchemaWalker + if n := len(*v.spareWalkers); n > 0 { + v2, *v.spareWalkers = (*v.spareWalkers)[n-1], (*v.spareWalkers)[:n-1] + } else { + v2 = &reconcileWithSchemaWalker{} + } + *v2 = *v + v2.typeRef = tr + v2.path = append(v.path, pe) + v2.value = v.value + return v2 +} + +func (v *reconcileWithSchemaWalker) finishDescent(v2 *reconcileWithSchemaWalker) { + v2.fieldSet = nil + v2.schema = nil + v2.value = nil + v2.typeRef = schema.TypeRef{} + if cap(v2.path) < 20 { // recycle slices that do not have unexpectedly high capacity + v2.path = v2.path[:0] + } else { + v2.path = nil + } + + // merge any accumulated changes into parent walker + if v2.toRemove != nil { + if v.toRemove == nil { + v.toRemove = v2.toRemove + } else { + v.toRemove = v.toRemove.Union(v2.toRemove) + } + } + if v2.toAdd != nil { + if v.toAdd == nil { + v.toAdd = v2.toAdd + } else { + v.toAdd = v.toAdd.Union(v2.toAdd) + } + } + v2.toRemove = nil + v2.toAdd = nil + + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + *v.spareWalkers = append(*v.spareWalkers, v2) +} + +// ReconcileFieldSetWithSchema reconciles the a field set with any changes to the +//// object's schema since the field set was written. Returns the reconciled field set, or nil of +// no changes were made to the field set. +// +// Supports: +// - changing types from atomic to granular +// - changing types from granular to atomic +func ReconcileFieldSetWithSchema(fieldset *fieldpath.Set, tv *TypedValue) (*fieldpath.Set, error) { + v := fmPool.Get().(*reconcileWithSchemaWalker) + v.fieldSet = fieldset + v.value = tv + + v.schema = tv.schema + v.typeRef = tv.typeRef + + // We don't reconcile deduced types, which are primarily for use by unstructured CRDs. Deduced + // types do not support atomic or granular tags. Nor does the dynamic schema deduction + // interact well with the reconcile logic. + if v.schema == DeducedParseableType.Schema { + return nil, nil + } + + defer v.finished() + errs := v.reconcile() + + if len(errs) > 0 { + return nil, fmt.Errorf("errors reconciling field set with schema: %s", errs.Error()) + } + + // If there are any accumulated changes, apply them + if v.toAdd != nil || v.toRemove != nil { + out := v.fieldSet + if v.toRemove != nil { + out = out.RecursiveDifference(v.toRemove) + } + if v.toAdd != nil { + out = out.Union(v.toAdd) + } + return out, nil + } + return nil, nil +} + +func (v *reconcileWithSchemaWalker) reconcile() (errs ValidationErrors) { + a, ok := v.schema.Resolve(v.typeRef) + if !ok { + errs = append(errs, errorf("could not resolve %v", v.typeRef)...) + return + } + return handleAtom(a, v.typeRef, v) +} + +func (v *reconcileWithSchemaWalker) doScalar(_ *schema.Scalar) (errs ValidationErrors) { + return errs +} + +func (v *reconcileWithSchemaWalker) visitListItems(t *schema.List, element *fieldpath.Set) (errs ValidationErrors) { + handleElement := func(pe fieldpath.PathElement, isMember bool) { + var hasChildren bool + v2 := v.prepareDescent(pe, t.ElementType) + v2.fieldSet, hasChildren = element.Children.Get(pe) + v2.isAtomic = isMember && !hasChildren + errs = append(errs, v2.reconcile()...) + v.finishDescent(v2) + } + element.Children.Iterate(func(pe fieldpath.PathElement) { + if element.Members.Has(pe) { + return + } + handleElement(pe, false) + }) + element.Members.Iterate(func(pe fieldpath.PathElement) { + handleElement(pe, true) + }) + return errs +} + +func (v *reconcileWithSchemaWalker) doList(t *schema.List) (errs ValidationErrors) { + // reconcile lists changed from granular to atomic + if !v.isAtomic && t.ElementRelationship == schema.Atomic { + v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields + v.toAdd = fieldpath.NewSet(v.path) // add the root of the atomic + return errs + } + // reconcile lists changed from atomic to granular + if v.isAtomic && t.ElementRelationship == schema.Associative { + v.toAdd, errs = buildGranularFieldSet(v.path, v.value) + if errs != nil { + return errs + } + } + if v.fieldSet != nil { + errs = v.visitListItems(t, v.fieldSet) + } + return errs +} + +func (v *reconcileWithSchemaWalker) visitMapItems(t *schema.Map, element *fieldpath.Set) (errs ValidationErrors) { + handleElement := func(pe fieldpath.PathElement, isMember bool) { + var hasChildren bool + if tr, ok := typeRefAtPath(t, pe); ok { // ignore fields not in the schema + v2 := v.prepareDescent(pe, tr) + v2.fieldSet, hasChildren = element.Children.Get(pe) + v2.isAtomic = isMember && !hasChildren + errs = append(errs, v2.reconcile()...) + v.finishDescent(v2) + } + } + element.Children.Iterate(func(pe fieldpath.PathElement) { + if element.Members.Has(pe) { + return + } + handleElement(pe, false) + }) + element.Members.Iterate(func(pe fieldpath.PathElement) { + handleElement(pe, true) + }) + + return errs +} + +func (v *reconcileWithSchemaWalker) doMap(t *schema.Map) (errs ValidationErrors) { + // reconcile maps and structs changed from granular to atomic + if !v.isAtomic && t.ElementRelationship == schema.Atomic { + if v.fieldSet != nil && v.fieldSet.Size() > 0 { + v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields + v.toAdd = fieldpath.NewSet(v.path) // add the root of the atomic + } + return errs + } + // reconcile maps changed from atomic to granular + if v.isAtomic && (t.ElementRelationship == schema.Separable || t.ElementRelationship == "") { + v.toAdd, errs = buildGranularFieldSet(v.path, v.value) + if errs != nil { + return errs + } + } + if v.fieldSet != nil { + errs = v.visitMapItems(t, v.fieldSet) + } + return errs +} + +func buildGranularFieldSet(path fieldpath.Path, value *TypedValue) (*fieldpath.Set, ValidationErrors) { + + valueFieldSet, err := value.ToFieldSet() + if err != nil { + return nil, errorf("toFieldSet: %v", err) + } + if valueFieldSetAtPath, ok := fieldSetAtPath(valueFieldSet, path); ok { + result := fieldpath.NewSet(path) + resultAtPath := descendToPath(result, path) + *resultAtPath = *valueFieldSetAtPath + return result, nil + } + return nil, nil +} + +func fieldSetAtPath(node *fieldpath.Set, path fieldpath.Path) (*fieldpath.Set, bool) { + ok := true + for _, pe := range path { + if node, ok = node.Children.Get(pe); !ok { + break + } + } + return node, ok +} + +func descendToPath(node *fieldpath.Set, path fieldpath.Path) *fieldpath.Set { + for _, pe := range path { + node = node.Children.Descend(pe) + } + return node +} + +func typeRefAtPath(t *schema.Map, pe fieldpath.PathElement) (schema.TypeRef, bool) { + tr := t.ElementType + if pe.FieldName != nil { + if sf, ok := t.FindField(*pe.FieldName); ok { + tr = sf.Type + } + } + return tr, tr != schema.TypeRef{} +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go new file mode 100644 index 0000000000..a20fc16f9b --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go @@ -0,0 +1,112 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +type removingWalker struct { + value value.Value + out interface{} + schema *schema.Schema + toRemove *fieldpath.Set + allocator value.Allocator +} + +func removeItemsWithSchema(val value.Value, toRemove *fieldpath.Set, schema *schema.Schema, typeRef schema.TypeRef) value.Value { + w := &removingWalker{ + value: val, + schema: schema, + toRemove: toRemove, + allocator: value.NewFreelistAllocator(), + } + resolveSchema(schema, typeRef, val, w) + return value.NewValueInterface(w.out) +} + +func (w *removingWalker) doScalar(t *schema.Scalar) ValidationErrors { + w.out = w.value.Unstructured() + return nil +} + +func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { + l := w.value.AsListUsing(w.allocator) + defer w.allocator.Free(l) + // If list is null, empty, or atomic just return + if l == nil || l.Length() == 0 || t.ElementRelationship == schema.Atomic { + return nil + } + + var newItems []interface{} + iter := l.RangeUsing(w.allocator) + defer w.allocator.Free(iter) + for iter.Next() { + i, item := iter.Item() + // Ignore error because we have already validated this list + pe, _ := listItemToPathElement(w.allocator, w.schema, t, i, item) + path, _ := fieldpath.MakePath(pe) + if w.toRemove.Has(path) { + continue + } + if subset := w.toRemove.WithPrefix(pe); !subset.Empty() { + item = removeItemsWithSchema(item, subset, w.schema, t.ElementType) + } + newItems = append(newItems, item.Unstructured()) + } + if len(newItems) > 0 { + w.out = newItems + } + return nil +} + +func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { + m := w.value.AsMapUsing(w.allocator) + if m != nil { + defer w.allocator.Free(m) + } + // If map is null, empty, or atomic just return + if m == nil || m.Empty() || t.ElementRelationship == schema.Atomic { + return nil + } + + fieldTypes := map[string]schema.TypeRef{} + for _, structField := range t.Fields { + fieldTypes[structField.Name] = structField.Type + } + + newMap := map[string]interface{}{} + m.Iterate(func(k string, val value.Value) bool { + pe := fieldpath.PathElement{FieldName: &k} + path, _ := fieldpath.MakePath(pe) + fieldType := t.ElementType + if ft, ok := fieldTypes[k]; ok { + fieldType = ft + } + if w.toRemove.Has(path) { + return true + } + if subset := w.toRemove.WithPrefix(pe); !subset.Empty() { + val = removeItemsWithSchema(val, subset, w.schema, fieldType) + } + newMap[k] = val.Unstructured() + return true + }) + if len(newMap) > 0 { + w.out = newMap + } + return nil +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go new file mode 100644 index 0000000000..bc88c086c4 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go @@ -0,0 +1,166 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "sync" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +var tPool = sync.Pool{ + New: func() interface{} { return &toFieldSetWalker{} }, +} + +func (tv TypedValue) toFieldSetWalker() *toFieldSetWalker { + v := tPool.Get().(*toFieldSetWalker) + v.value = tv.value + v.schema = tv.schema + v.typeRef = tv.typeRef + v.set = &fieldpath.Set{} + v.allocator = value.NewFreelistAllocator() + return v +} + +func (v *toFieldSetWalker) finished() { + v.schema = nil + v.typeRef = schema.TypeRef{} + v.path = nil + v.set = nil + tPool.Put(v) +} + +type toFieldSetWalker struct { + value value.Value + schema *schema.Schema + typeRef schema.TypeRef + + set *fieldpath.Set + path fieldpath.Path + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*toFieldSetWalker + allocator value.Allocator +} + +func (v *toFieldSetWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *toFieldSetWalker { + if v.spareWalkers == nil { + // first descent. + v.spareWalkers = &[]*toFieldSetWalker{} + } + var v2 *toFieldSetWalker + if n := len(*v.spareWalkers); n > 0 { + v2, *v.spareWalkers = (*v.spareWalkers)[n-1], (*v.spareWalkers)[:n-1] + } else { + v2 = &toFieldSetWalker{} + } + *v2 = *v + v2.typeRef = tr + v2.path = append(v2.path, pe) + return v2 +} + +func (v *toFieldSetWalker) finishDescent(v2 *toFieldSetWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + v.path = v2.path[:len(v2.path)-1] + *v.spareWalkers = append(*v.spareWalkers, v2) +} + +func (v *toFieldSetWalker) toFieldSet() ValidationErrors { + return resolveSchema(v.schema, v.typeRef, v.value, v) +} + +func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors { + v.set.Insert(v.path) + + return nil +} + +func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + for i := 0; i < list.Length(); i++ { + child := list.At(i) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, i, child) + v2 := v.prepareDescent(pe, t.ElementType) + v2.value = child + errs = append(errs, v2.toFieldSet()...) + + v2.set.Insert(v2.path) + v.finishDescent(v2) + } + return errs +} + +func (v *toFieldSetWalker) doList(t *schema.List) (errs ValidationErrors) { + list, _ := listValue(v.allocator, v.value) + if list != nil { + defer v.allocator.Free(list) + } + if t.ElementRelationship == schema.Atomic { + v.set.Insert(v.path) + return nil + } + + if list == nil { + return nil + } + + errs = v.visitListItems(t, list) + + return errs +} + +func (v *toFieldSetWalker) visitMapItems(t *schema.Map, m value.Map) (errs ValidationErrors) { + m.Iterate(func(key string, val value.Value) bool { + pe := fieldpath.PathElement{FieldName: &key} + + tr := t.ElementType + if sf, ok := t.FindField(key); ok { + tr = sf.Type + } + v2 := v.prepareDescent(pe, tr) + v2.value = val + errs = append(errs, v2.toFieldSet()...) + if _, ok := t.FindField(key); !ok { + v2.set.Insert(v2.path) + } + v.finishDescent(v2) + return true + }) + return errs +} + +func (v *toFieldSetWalker) doMap(t *schema.Map) (errs ValidationErrors) { + m, _ := mapValue(v.allocator, v.value) + if m != nil { + defer v.allocator.Free(m) + } + if t.ElementRelationship == schema.Atomic { + v.set.Insert(v.path) + return nil + } + + if m == nil { + return nil + } + + errs = v.visitMapItems(t, m) + + return errs +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go new file mode 100644 index 0000000000..54766f6edc --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -0,0 +1,315 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "strings" + "sync" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// AsTyped accepts a value and a type and returns a TypedValue. 'v' must have +// type 'typeName' in the schema. An error is returned if the v doesn't conform +// to the schema. +func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedValue, error) { + tv := &TypedValue{ + value: v, + typeRef: typeRef, + schema: s, + } + if err := tv.Validate(); err != nil { + return nil, err + } + return tv, nil +} + +// AsTypeUnvalidated is just like AsTyped, but doesn't validate that the type +// conforms to the schema, for cases where that has already been checked or +// where you're going to call a method that validates as a side-effect (like +// ToFieldSet). +func AsTypedUnvalidated(v value.Value, s *schema.Schema, typeRef schema.TypeRef) *TypedValue { + tv := &TypedValue{ + value: v, + typeRef: typeRef, + schema: s, + } + return tv +} + +// TypedValue is a value of some specific type. +type TypedValue struct { + value value.Value + typeRef schema.TypeRef + schema *schema.Schema +} + +// TypeRef is the type of the value. +func (tv TypedValue) TypeRef() schema.TypeRef { + return tv.typeRef +} + +// AsValue removes the type from the TypedValue and only keeps the value. +func (tv TypedValue) AsValue() value.Value { + return tv.value +} + +// Schema gets the schema from the TypedValue. +func (tv TypedValue) Schema() *schema.Schema { + return tv.schema +} + +// Validate returns an error with a list of every spec violation. +func (tv TypedValue) Validate() error { + w := tv.walker() + defer w.finished() + if errs := w.validate(nil); len(errs) != 0 { + return errs + } + return nil +} + +// ToFieldSet creates a set containing every leaf field and item mentioned, or +// validation errors, if any were encountered. +func (tv TypedValue) ToFieldSet() (*fieldpath.Set, error) { + w := tv.toFieldSetWalker() + defer w.finished() + if errs := w.toFieldSet(); len(errs) != 0 { + return nil, errs + } + return w.set, nil +} + +// Merge returns the result of merging tv and pso ("partially specified +// object") together. Of note: +// * No fields can be removed by this operation. +// * If both tv and pso specify a given leaf field, the result will keep pso's +// value. +// * Container typed elements will have their items ordered: +// * like tv, if pso doesn't change anything in the container +// * like pso, if pso does change something in the container. +// tv and pso must both be of the same type (their Schema and TypeRef must +// match), or an error will be returned. Validation errors will be returned if +// the objects don't conform to the schema. +func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { + return merge(&tv, pso, ruleKeepRHS, nil) +} + +// Compare compares the two objects. See the comments on the `Comparison` +// struct for details on the return value. +// +// tv and rhs must both be of the same type (their Schema and TypeRef must +// match), or an error will be returned. Validation errors will be returned if +// the objects don't conform to the schema. +func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) { + c = &Comparison{ + Removed: fieldpath.NewSet(), + Modified: fieldpath.NewSet(), + Added: fieldpath.NewSet(), + } + _, err = merge(&tv, rhs, func(w *mergingWalker) { + if w.lhs == nil { + c.Added.Insert(w.path) + } else if w.rhs == nil { + c.Removed.Insert(w.path) + } else if !value.Equals(w.rhs, w.lhs) { + // TODO: Equality is not sufficient for this. + // Need to implement equality check on the value type. + c.Modified.Insert(w.path) + } + }, func(w *mergingWalker) { + if w.lhs == nil { + c.Added.Insert(w.path) + } else if w.rhs == nil { + c.Removed.Insert(w.path) + } + }) + if err != nil { + return nil, err + } + + return c, nil +} + +// RemoveItems removes each provided list or map item from the value. +func (tv TypedValue) RemoveItems(items *fieldpath.Set) *TypedValue { + tv.value = removeItemsWithSchema(tv.value, items, tv.schema, tv.typeRef) + return &tv +} + +// NormalizeUnions takes the new object and normalizes the union: +// - If discriminator changed to non-nil, and a new field has been added +// that doesn't match, an error is returned, +// - If discriminator hasn't changed and two fields or more are set, an +// error is returned, +// - If discriminator changed to non-nil, all other fields but the +// discriminated one will be cleared, +// - Otherwise, If only one field is left, update discriminator to that value. +// +// Please note: union behavior isn't finalized yet and this is still experimental. +func (tv TypedValue) NormalizeUnions(new *TypedValue) (*TypedValue, error) { + var errs ValidationErrors + var normalizeFn = func(w *mergingWalker) { + if w.rhs != nil { + v := w.rhs.Unstructured() + w.out = &v + } + if err := normalizeUnions(w); err != nil { + errs = append(errs, errorf(err.Error())...) + } + } + out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) + if mergeErrs != nil { + errs = append(errs, mergeErrs.(ValidationErrors)...) + } + if len(errs) > 0 { + return nil, errs + } + return out, nil +} + +// NormalizeUnionsApply specifically normalize unions on apply. It +// validates that the applied union is correct (there should be no +// ambiguity there), and clear the fields according to the sent intent. +// +// Please note: union behavior isn't finalized yet and this is still experimental. +func (tv TypedValue) NormalizeUnionsApply(new *TypedValue) (*TypedValue, error) { + var errs ValidationErrors + var normalizeFn = func(w *mergingWalker) { + if w.rhs != nil { + v := w.rhs.Unstructured() + w.out = &v + } + if err := normalizeUnionsApply(w); err != nil { + errs = append(errs, errorf(err.Error())...) + } + } + out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) + if mergeErrs != nil { + errs = append(errs, mergeErrs.(ValidationErrors)...) + } + if len(errs) > 0 { + return nil, errs + } + return out, nil +} + +func (tv TypedValue) Empty() *TypedValue { + tv.value = value.NewValueInterface(nil) + return &tv +} + +var mwPool = sync.Pool{ + New: func() interface{} { return &mergingWalker{} }, +} + +func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) { + if lhs.schema != rhs.schema { + return nil, errorf("expected objects with types from the same schema") + } + if !lhs.typeRef.Equals(&rhs.typeRef) { + return nil, errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef) + } + + mw := mwPool.Get().(*mergingWalker) + defer func() { + mw.lhs = nil + mw.rhs = nil + mw.schema = nil + mw.typeRef = schema.TypeRef{} + mw.rule = nil + mw.postItemHook = nil + mw.out = nil + mw.inLeaf = false + + mwPool.Put(mw) + }() + + mw.lhs = lhs.value + mw.rhs = rhs.value + mw.schema = lhs.schema + mw.typeRef = lhs.typeRef + mw.rule = rule + mw.postItemHook = postRule + if mw.allocator == nil { + mw.allocator = value.NewFreelistAllocator() + } + + errs := mw.merge(nil) + if len(errs) > 0 { + return nil, errs + } + + out := &TypedValue{ + schema: lhs.schema, + typeRef: lhs.typeRef, + } + if mw.out != nil { + out.value = value.NewValueInterface(*mw.out) + } + return out, nil +} + +// Comparison is the return value of a TypedValue.Compare() operation. +// +// No field will appear in more than one of the three fieldsets. If all of the +// fieldsets are empty, then the objects must have been equal. +type Comparison struct { + // Removed contains any fields removed by rhs (the right-hand-side + // object in the comparison). + Removed *fieldpath.Set + // Modified contains fields present in both objects but different. + Modified *fieldpath.Set + // Added contains any fields added by rhs. + Added *fieldpath.Set +} + +// IsSame returns true if the comparison returned no changes (the two +// compared objects are similar). +func (c *Comparison) IsSame() bool { + return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() +} + +// String returns a human readable version of the comparison. +func (c *Comparison) String() string { + bld := strings.Builder{} + if !c.Modified.Empty() { + bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) + } + if !c.Added.Empty() { + bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) + } + if !c.Removed.Empty() { + bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) + } + return bld.String() +} + +// ExcludeFields fields from the compare recursively removes the fields +// from the entire comparison +func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { + if fields == nil || fields.Empty() { + return c + } + c.Removed = c.Removed.RecursiveDifference(fields) + c.Modified = c.Modified.RecursiveDifference(fields) + c.Added = c.Added.RecursiveDifference(fields) + return c +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go new file mode 100644 index 0000000000..1fa5d88ae6 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go @@ -0,0 +1,276 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +func normalizeUnions(w *mergingWalker) error { + atom, found := w.schema.Resolve(w.typeRef) + if !found { + panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) + } + // Unions can only be in structures, and the struct must not have been removed + if atom.Map == nil || w.out == nil { + return nil + } + + var old value.Map + if w.lhs != nil && !w.lhs.IsNull() { + old = w.lhs.AsMap() + } + for _, union := range atom.Map.Unions { + if err := newUnion(&union).Normalize(old, w.rhs.AsMap(), value.NewValueInterface(*w.out).AsMap()); err != nil { + return err + } + } + return nil +} + +func normalizeUnionsApply(w *mergingWalker) error { + atom, found := w.schema.Resolve(w.typeRef) + if !found { + panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) + } + // Unions can only be in structures, and the struct must not have been removed + if atom.Map == nil || w.out == nil { + return nil + } + + var old value.Map + if w.lhs != nil && !w.lhs.IsNull() { + old = w.lhs.AsMap() + } + + for _, union := range atom.Map.Unions { + out := value.NewValueInterface(*w.out) + if err := newUnion(&union).NormalizeApply(old, w.rhs.AsMap(), out.AsMap()); err != nil { + return err + } + *w.out = out.Unstructured() + } + return nil +} + +type discriminated string +type field string + +type discriminatedNames struct { + f2d map[field]discriminated + d2f map[discriminated]field +} + +func newDiscriminatedName(f2d map[field]discriminated) discriminatedNames { + d2f := map[discriminated]field{} + for key, value := range f2d { + d2f[value] = key + } + return discriminatedNames{ + f2d: f2d, + d2f: d2f, + } +} + +func (dn discriminatedNames) toField(d discriminated) field { + if f, ok := dn.d2f[d]; ok { + return f + } + return field(d) +} + +func (dn discriminatedNames) toDiscriminated(f field) discriminated { + if d, ok := dn.f2d[f]; ok { + return d + } + return discriminated(f) +} + +type discriminator struct { + name string +} + +func (d *discriminator) Set(m value.Map, v discriminated) { + if d == nil { + return + } + m.Set(d.name, value.NewValueInterface(string(v))) +} + +func (d *discriminator) Get(m value.Map) discriminated { + if d == nil || m == nil { + return "" + } + val, ok := m.Get(d.name) + if !ok { + return "" + } + if !val.IsString() { + return "" + } + return discriminated(val.AsString()) +} + +type fieldsSet map[field]struct{} + +// newFieldsSet returns a map of the fields that are part of the union and are set +// in the given map. +func newFieldsSet(m value.Map, fields []field) fieldsSet { + if m == nil { + return nil + } + set := fieldsSet{} + for _, f := range fields { + if subField, ok := m.Get(string(f)); ok && !subField.IsNull() { + set.Add(f) + } + } + return set +} + +func (fs fieldsSet) Add(f field) { + if fs == nil { + fs = map[field]struct{}{} + } + fs[f] = struct{}{} +} + +func (fs fieldsSet) One() *field { + for f := range fs { + return &f + } + return nil +} + +func (fs fieldsSet) Has(f field) bool { + _, ok := fs[f] + return ok +} + +func (fs fieldsSet) List() []field { + fields := []field{} + for f := range fs { + fields = append(fields, f) + } + return fields +} + +func (fs fieldsSet) Difference(o fieldsSet) fieldsSet { + n := fieldsSet{} + for f := range fs { + if !o.Has(f) { + n.Add(f) + } + } + return n +} + +func (fs fieldsSet) String() string { + s := []string{} + for k := range fs { + s = append(s, string(k)) + } + return strings.Join(s, ", ") +} + +type union struct { + deduceInvalidDiscriminator bool + d *discriminator + dn discriminatedNames + f []field +} + +func newUnion(su *schema.Union) *union { + u := &union{} + if su.Discriminator != nil { + u.d = &discriminator{name: *su.Discriminator} + } + f2d := map[field]discriminated{} + for _, f := range su.Fields { + u.f = append(u.f, field(f.FieldName)) + f2d[field(f.FieldName)] = discriminated(f.DiscriminatorValue) + } + u.dn = newDiscriminatedName(f2d) + u.deduceInvalidDiscriminator = su.DeduceInvalidDiscriminator + return u +} + +// clear removes all the fields in map that are part of the union, but +// the one we decided to keep. +func (u *union) clear(m value.Map, f field) { + for _, fieldName := range u.f { + if field(fieldName) != f { + m.Delete(string(fieldName)) + } + } +} + +func (u *union) Normalize(old, new, out value.Map) error { + os := newFieldsSet(old, u.f) + ns := newFieldsSet(new, u.f) + diff := ns.Difference(os) + + if u.d.Get(old) != u.d.Get(new) && u.d.Get(new) != "" { + if len(diff) == 1 && u.d.Get(new) != u.dn.toDiscriminated(*diff.One()) { + return fmt.Errorf("discriminator (%v) and field changed (%v) don't match", u.d.Get(new), diff.One()) + } + if len(diff) > 1 { + return fmt.Errorf("multiple new fields added: %v", diff) + } + u.clear(out, u.dn.toField(u.d.Get(new))) + return nil + } + + if len(ns) > 1 { + return fmt.Errorf("multiple fields set without discriminator change: %v", ns) + } + + // Set discriminiator if it needs to be deduced. + if u.deduceInvalidDiscriminator && len(ns) == 1 { + u.d.Set(out, u.dn.toDiscriminated(*ns.One())) + } + + return nil +} + +func (u *union) NormalizeApply(applied, merged, out value.Map) error { + as := newFieldsSet(applied, u.f) + if len(as) > 1 { + return fmt.Errorf("more than one field of union applied: %v", as) + } + if len(as) == 0 { + // None is set, just leave. + return nil + } + // We have exactly one, discriminiator must match if set + if u.d.Get(applied) != "" && u.d.Get(applied) != u.dn.toDiscriminated(*as.One()) { + return fmt.Errorf("applied discriminator (%v) doesn't match applied field (%v)", u.d.Get(applied), *as.One()) + } + + // Update discriminiator if needed + if u.deduceInvalidDiscriminator { + u.d.Set(out, u.dn.toDiscriminated(*as.One())) + } + // Clear others fields. + u.clear(out, *as.One()) + + return nil +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go new file mode 100644 index 0000000000..403ec8fe00 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go @@ -0,0 +1,195 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "sync" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +var vPool = sync.Pool{ + New: func() interface{} { return &validatingObjectWalker{} }, +} + +func (tv TypedValue) walker() *validatingObjectWalker { + v := vPool.Get().(*validatingObjectWalker) + v.value = tv.value + v.schema = tv.schema + v.typeRef = tv.typeRef + if v.allocator == nil { + v.allocator = value.NewFreelistAllocator() + } + return v +} + +func (v *validatingObjectWalker) finished() { + v.schema = nil + v.typeRef = schema.TypeRef{} + vPool.Put(v) +} + +type validatingObjectWalker struct { + value value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*validatingObjectWalker + allocator value.Allocator +} + +func (v *validatingObjectWalker) prepareDescent(tr schema.TypeRef) *validatingObjectWalker { + if v.spareWalkers == nil { + // first descent. + v.spareWalkers = &[]*validatingObjectWalker{} + } + var v2 *validatingObjectWalker + if n := len(*v.spareWalkers); n > 0 { + v2, *v.spareWalkers = (*v.spareWalkers)[n-1], (*v.spareWalkers)[:n-1] + } else { + v2 = &validatingObjectWalker{} + } + *v2 = *v + v2.typeRef = tr + return v2 +} + +func (v *validatingObjectWalker) finishDescent(v2 *validatingObjectWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + *v.spareWalkers = append(*v.spareWalkers, v2) +} + +func (v *validatingObjectWalker) validate(prefixFn func() string) ValidationErrors { + return resolveSchema(v.schema, v.typeRef, v.value, v).WithLazyPrefix(prefixFn) +} + +func validateScalar(t *schema.Scalar, v value.Value, prefix string) (errs ValidationErrors) { + if v == nil { + return nil + } + if v.IsNull() { + return nil + } + switch *t { + case schema.Numeric: + if !v.IsFloat() && !v.IsInt() { + // TODO: should the schema separate int and float? + return errorf("%vexpected numeric (int or float), got %T", prefix, v) + } + case schema.String: + if !v.IsString() { + return errorf("%vexpected string, got %#v", prefix, v) + } + case schema.Boolean: + if !v.IsBool() { + return errorf("%vexpected boolean, got %v", prefix, v) + } + } + return nil +} + +func (v *validatingObjectWalker) doScalar(t *schema.Scalar) ValidationErrors { + if errs := validateScalar(t, v.value, ""); len(errs) > 0 { + return errs + } + return nil +} + +func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + observedKeys := fieldpath.MakePathElementSet(list.Length()) + for i := 0; i < list.Length(); i++ { + child := list.AtUsing(v.allocator, i) + defer v.allocator.Free(child) + var pe fieldpath.PathElement + if t.ElementRelationship != schema.Associative { + pe.Index = &i + } else { + var err error + pe, err = listItemToPathElement(v.allocator, v.schema, t, i, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + return + } + if observedKeys.Has(pe) { + errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) + } + observedKeys.Insert(pe) + } + v2 := v.prepareDescent(t.ElementType) + v2.value = child + errs = append(errs, v2.validate(pe.String)...) + v.finishDescent(v2) + } + return errs +} + +func (v *validatingObjectWalker) doList(t *schema.List) (errs ValidationErrors) { + list, err := listValue(v.allocator, v.value) + if err != nil { + return errorf(err.Error()) + } + + if list == nil { + return nil + } + + defer v.allocator.Free(list) + errs = v.visitListItems(t, list) + + return errs +} + +func (v *validatingObjectWalker) visitMapItems(t *schema.Map, m value.Map) (errs ValidationErrors) { + m.IterateUsing(v.allocator, func(key string, val value.Value) bool { + pe := fieldpath.PathElement{FieldName: &key} + tr := t.ElementType + if sf, ok := t.FindField(key); ok { + tr = sf.Type + } else if (t.ElementType == schema.TypeRef{}) { + errs = append(errs, errorf("field not declared in schema").WithPrefix(pe.String())...) + return false + } + v2 := v.prepareDescent(tr) + v2.value = val + // Giving pe.String as a parameter actually increases the allocations. + errs = append(errs, v2.validate(func() string { return pe.String() })...) + v.finishDescent(v2) + return true + }) + return errs +} + +func (v *validatingObjectWalker) doMap(t *schema.Map) (errs ValidationErrors) { + m, err := mapValue(v.allocator, v.value) + if err != nil { + return errorf(err.Error()) + } + if m == nil { + return nil + } + defer v.allocator.Free(m) + errs = v.visitMapItems(t, m) + + return errs +}